~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-devel-9.6.1-1.1.x86_64.rpm RPMS/deal_II-devel-9.6.1-1.1.x86_64.rpm differ: byte 225, line 1 Comparing deal_II-devel-9.6.1-1.1.x86_64.rpm to deal_II-devel-9.6.1-1.1.x86_64.rpm comparing the rpm tags of deal_II-devel --- old-rpm-tags +++ new-rpm-tags @@ -10399 +10399 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 6c0f76d55e8683bad0a11859238b02c00339fd02532f16ebcdc9bdc84ee454ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html a66882cc70744f73c3bc61cff941fea136f961061cb78b8b74d26be5f822a171 2 @@ -10402,3 +10402,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html dce5944c26deda6366b011a73f34be67edfb41b74074efb19620787492bfb831 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex f6ad8d68b8aa18872fbb1b134c14137af92f64944f5242889a2363488079da83 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 81f2ce02eea5cd0ec9e73290e434e6b02fe3557dac7f93b24a269cba22ca1158 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html b9e6774e374b160e14775de73b7b91fcad3d6f259d1cec7b4ca41ec4c38d4b79 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex d722b22d5a54577814bdd651d30cceef9cd67db0e185f00ce1218f83ea6a1d9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 7e5c7e9d7084e8fc3099069ee5b7e4dd3e71586a3a22d09d529d5939158fd043 2 @@ -10574 +10574 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 38978714de80f462581976eae50d8b44c9d6614ce19e0de7281e78332c125aeb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 75e08e2764e235da58c01abc019bdd97fd727e6fe60c49722dcfc6ebc8af44ea 2 @@ -10576 +10576 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 4d622e89675d3cdfec0dc96b0ee15778100b116e9b9367961cc55e95f7221203 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html a3b38240f60e3b4e562b7f50c1c5e931b24a3201a087370695c7ea38b00a9f07 2 @@ -10584 +10584 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 9c806479efeb5a863da8c49ce7481c48a11562b74bf8382e00db27958c399b20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2847317e74ccf84c91c54341c01f8eb6691b48738011ed4a2959d21ea665df08 2 @@ -10586 +10586 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 28769bff50556ecc25b001a4827523e6d3abc324f70969f49d0f7dd619cfdfe7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 7a041f638fdcb31f8eb89394493ff55a8010e5ce512aa8d59f2d03070be33c1e 2 @@ -10595 +10595 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 44f025c0ef4293f5acda028700a53318abc9499e8ec0071b2418fd5f7473fb92 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 83ca4e1fa686de89169df3ff452a3c1a31175e4d4d3cfa417905bb2473490f3b 2 @@ -10623 +10623 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 54ed43a9f8b0ce831a3cf199774fcde5dabf75fd32fd359e41cb4bb7088ba5c5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html c1a00513490b7d44e1d1bbf5307eff296fcb37f4f4d9a14e8e93263f89f3f04f 2 @@ -10641 +10641 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html b58e32c9bb60a40c6b2342c4305d6ff136f9a8a96d77d0710a1d8ab05bd2d11d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 6ed9ebb45c3a8708f0203fc5c204650ee77ac3b5a36975379fcd6a1c00ba581d 2 @@ -10658 +10658 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 3a24752f64486b94ab21d87f589fe05a5c52ca0d2f5713075bf6c44c16e83845 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 74f51c11e3fa3358e713581e352b442701f922f4986cf82f5199463b0f11d357 2 @@ -10695 +10695 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 45e156ee819fd859a953ff0c76749557d6d81afbe14eb089182b5dff49205b30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 334e9f5f360d7b382d0ffd47695a6fcbbd4dad0621a7779ee14b2b015c6bd64b 2 @@ -10698 +10698 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html da5d4d4020c2feefc6b76a49bfe843c8ce8cf8eb316ccba1a439d593849bb1af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 19509e503c036b5ea692eb1f56ad22cc5ac814d23e4459a0f9600f177ffc4f03 2 @@ -10701 +10701 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 450cc0b760922081823bc305d868a7298286a529678d951a9139e0e7ff45e12d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 1f797a955609c76f8ce0115602ad75bb215aa72c19267539de390d9236e59e7c 2 @@ -10704 +10704 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 3262e214625b669e17461cf7b3551cb2ef23dd03615f2223c6afef55ceb025fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html a0fd628723c4b2d23d7183cebf0ddc4a44e8b5baa4fd39a6d8ec032e943e9df6 2 @@ -10711 +10711 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 96c726047b607e1c9f85f4c2b1647514f15a692298f81d7813dfbfcc1b160ed5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 815e9014ba747581c0ad9e6739727c3d023cb0b28bcb8b12a66ef9edc2af9016 2 @@ -10717 +10717 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 096807c6824cff59376679b8cba8e9e2d3eaf14e59d2fd3ceb1d7a1f7d0e9144 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2b5f3835ef237705a0b21a7088788c545780513119dccfb0d5045fac919f4772 2 @@ -10723 +10723 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html a4424dad4240b46ba63f11d5b80ed47c35952049b5e5e98d93c0a856ec01db33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html f23be64ee21d65e88f1d4f428e4bc8bf5222f8ec669116db0ed4811a7768644f 2 @@ -10728 +10728 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2b029ff2f540878a2a13402f922a6192d4d9ae99270fa0ed5739f3237f3ca439 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 4976df2975d7fe95abd402f741e6959d910c02efd9704c1ed91d161a82bb4fb9 2 @@ -10741 +10741 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 849ec17c89876faf687ac03060f1b3cade1c8af1d13f3b790f3e8ce08aee8cea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 342834c865be032ef8ed33b669b006d97c2f46932d379760476ff0766633179b 2 @@ -10743 +10743 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html a859eac6acff3b2df89433c9605eb38fc9d721c962e635df78c421705c7ed94b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html de8d02d15364c60d741e0d067fb410f72ac982b692f34a67a6d9cf8ae326b541 2 @@ -10753 +10753 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2a9125efe692526f7fc07e4ca66c17f88ddd0977b6e8d594569b17114b8b6fde 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html cf74f277c6dc501a9b3524e668e65ed0fe7690141d2282dff6ba7004d516b572 2 @@ -10755 +10755 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 63794e1b3cf198770762628491cc5bfa98e729a5e06e73958248094d477c6922 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html d356a6b0c9fba0406abcf9b0eeb743479d653942e1bb0c05f39d714191ca0815 2 @@ -10759 +10759 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 905c7b123a00735a38b603ef318b777d7df283d208a2b37ad51573060a86367f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html e0e3a863405a6277ecb6cb8786e21744563e600008d10a87bab57b02ba46e3c9 2 @@ -10773 +10773 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html cde5c4418c38ca6f476002b62f7285a3bbc2985dc5efc0fdc414b96d4584ccf4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 8d645e95830cade47c99fa54951805288034608e9cd143012b135c59b1fd74ea 2 @@ -10786 +10786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html d8d53518bbd4ad0f9a396023734d0255a62cdb07e557a4ce5e4bacbeed95eb15 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 1036f64fa0033626c300a9dd8b40443a8257b5947edda5119adc0e18b646d995 2 @@ -10789 +10789 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 97ed420d8cd992ec5c5bd3c41250110a3ae1b649e6663a84c086ccfca9343322 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 5e38b2e19854a0cd41f0617fb418bd1ab55b72ae9e0574ddb7b7e4040408a44d 2 @@ -10805 +10805 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 6c0586f5ecc691daafb4be1e3e4e92e20a519004de5efec717e2a93da620fa57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 5e997cd1b8d97f6f3e4f1e9ab9c9cf7088feacb28829f7aa7483fe2d124c04d3 2 @@ -10818 +10818 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html e004b7780e5548a1887a72486796f7bcd8c657773fe0ba1d14cdcb3b5243555a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 368f981f8700e028fafbc80228ce49d2e628189f8a9caed0400373302decd8b9 2 @@ -10832 +10832 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html f9ee6bb19c5d14841fb1170f07aa9287eb00af10370c8d66a5c43ab9ca39af28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html ff9b87dbf1bb59d55a9421b555bd4188f81b3d174fb50507207d007f683a6b03 2 @@ -10861 +10861 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 7434fe63747ab85af53b935ea15f42ca29015ed764eaf6a5ec2cd80066dd5b0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 4db1d55bf0f12eb14f83c972d45364844e0adee1542783e5dc66a37c68b54e95 2 @@ -10866 +10866 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 727fb7d5b30802a2551e046fbb9bfc45323e0e2321dd48bfbb479749fd2bd72a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 97aa3e2b044912c311da7ae96f190f0ba789945a229ff8e663a738bb0b7937a4 2 @@ -10869 +10869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 6e003bb2e0d7e1315b9e3b86695f864d8557dd0810fc12abca81ee32e95923a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2fb962d1278dd6a5209be7bf16ea12cb39601a65635415019410e15a15a374f6 2 @@ -10886 +10886 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 85bf5f6d20711361a80b5efdaa93fbaed843dcc3f8080874070b20da6faec0c2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 6275f226fed2651f26ce4cf6b27900924d5b0827bae4f3e11c5c3273bfece1ab 2 @@ -10888 +10888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 763bf2685572414cd1454575f3d6715d9261956a3fdb6dcfee58de3e827f8c9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 8634539e7883a620ef53d361019a542c46f01c57b44b731b50058219d95e7cd0 2 @@ -10890 +10890 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html e40e94735669ce41a6706a7357f43e35902c5e7eacaacd200f2d904cd673456b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html c5199edd16288aab88677c79900d3c5a4e76c0f2ebc4a0ac460d88dee333cc0d 2 @@ -10899 +10899 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2091d5b243ab05c29b8a9c6801a4700013d3097305f00e7f5abd787d66abf82e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 96657c50cb3c992bddfc794c2186c74b1f0c30a45b43f4cfb9edd2251c6fc9ff 2 @@ -10926 +10926 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 527f9bbb7f77430523520776af87bc7cef4418fb0ac79d96fb353c856d62975c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 0d3f7321e098c2304e018a0f2db9c5f1fea505f58e0d0edf24c25ead9782969c 2 @@ -10937 +10937 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 3bae878e9ab1bcdec3f4dcdc5063b86e7304550261b23660112343e21ec48377 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 08b33c851f148e387a60c65a9afb6daf44602098654ba87572d95ca9957a3ba9 2 @@ -10948 +10948 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 19f88e160e970b06673623d8873c08d385e7b3a8374cbc109efe01685f0a9341 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 6930334e8331571548b670f88b07dde96e2eec151a45503322371b13d4246740 2 @@ -10955 +10955 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html da9d76fd48f2ec8d7b35de79c4fa47032a99543482b57b391ca7a28d9d034be5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2c9a1b6e6bc6edd97b401a07bc912340ff49c906d6e0575f8ce27af8e90d6e8a 2 @@ -10958 +10958 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html efbc3f4cac7970b21d13069ac2d3c3d1423dab718defc88ee17edc78e8582c56 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 93d447e637be6e64e1a6168d52ca3b710b49550e660e696ce9699649d9132944 2 @@ -10961 +10961 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html da61e22097118c56b5efaa37bcfc256a211fe34159179ab3fd9475e1004111f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html aec83629198d0c0797be32e654f1cafbadedddd57c91cd280d2ebd14bdb12fa0 2 @@ -10969 +10969 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 965f26979470b7601ec6aa764be0232895dcaeb59fbfdaede923319b841e8e61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 20b6830feaf13dd778241f811893ed1030bd9cf4a3f1b8c548b5f3d445345970 2 @@ -10971 +10971 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 840846155c6ae69bdda4879d85ea73017df9d2b2680c73a1a31571a5275e117a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 01bd5cf19d1d67f582609d4138070fc804b55e31aa6c18266d18a7679803c1b9 2 @@ -10973 +10973 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html cb8777f44d3ea52e799253bcee1ecec84b51f9e950cede6832032572c9736f29 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 991bfbd3d14f2d1644521cbeddf58b22d31c0a1711bb1cddebaba09764eab486 2 @@ -10976 +10976 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 331e3256576e38bcb6c3f5445dd5ddb746b856e3daed601805f97fc06d86415b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 1e174ead3e132af3cbb3694442523875e478452497bc36bd263f5a58da824c6a 2 @@ -10980 +10980 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html ff3d9eda0c25d3b9cdacd408ac0d28d095b027cbfe474a778d034ff74da3bd9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 29b1b51224ed4a0ae23c1e44d47aed21dbb51e00912466b28fe42b6269e9d450 2 @@ -10983 +10983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html ba8d8b1d952efb57dd8b27a3dfd93046cbdc274674468d83b6b885d2a549a464 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html b5137e2bf9f8d7e9beb62bce69941a6ae3d8f62dc80e667dc8a9d808d948f93e 2 @@ -10986 +10986 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 90c1642f1412d9ab263d191d7d3ef3eb4ca1ea63e01865ffb46e0819fb8598ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 1469ccd0051a5c4b81dd81296ab67465ddcbc8071382d80b0ed6da11205a0075 2 @@ -10988 +10988 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 80fc95c39ade49a0f1621cc071bfaf888b439981332d02f99a79af440f75bd96 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html c2df5b553679dd30d50502775d321f5088c85fb3c5184e4eb798651f606158a5 2 @@ -10992 +10992 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 6934676a658e748febb8284716bd96dff5c1b1b6b1d52dd5ae28b4cecea6469a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 7138f14a3c64cad899c0f87b355e0ff1ea39fe2c205f8d89975cd58426c25850 2 @@ -10998 +10998 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 838c075c6bbb4a2e02568c063d6a644caabd298aaf8c757d43647fdc6f76889e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 3649727255a2e16eabf97644a5a11f1e1465bb8c8f47261b067e1819c91f87e2 2 @@ -11001 +11001 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html b37193c1721afc48250b03707f67a5cdd7f3589c605ced89a2895145cd3d65b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 78bf52bcdaf51daa35bca3994e250515193dec9c2540bfc5dbe30f3e9cb0aefb 2 @@ -11004 +11004 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html ca857b6a94229502417a351b9d6ac20f1d3ba8eeaa2319bebab1b47a6a618b36 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html 6e5c11d281252d829ab29f3ae569ac60b413c9abd1f4ddcc5f47250c98a4f66a 2 @@ -11006 +11006 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html bfbebe4341cf28e72434413d07af0547d4ca8aadb2fb6f924b9878ed89c57f57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html 3c18f233ef46c5139d6578f74accababbb839deab1b72e91a762323169fb2077 2 @@ -11017 +11017 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2d7331780ccbb39d7fd1dccbd240db54f82a7434f08ea76cb752551915eaad77 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html dba553497f90567dd46795e3c7656b8872876bdc3d31a7ed6ff3b51d0dcabae0 2 @@ -11020 +11020 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html a6049bd7055640a8c0118d8b8c6012f25b6c197fc8b82c091ec48973ac2d759c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 723f88e3716c78cf500483de42e68a047f4a456f6adf1a533f780c9a76a5323b 2 @@ -11023 +11023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html a6254f9a845616956aad8c6dcb6bccd49b87c0859d250615b582cb568bd72416 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html a172da5cee67013145c3188b618e7d113841f9b26b61b1840362f8c83da2e5f5 2 @@ -11026 +11026 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html e830a80b987b1e529f7cc893843ba0c1a5c8065a9a2a260042ba47bf2783dfa9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 8f7b5fbc0555d37e05a585c34e3cbd9e24e0fa2b748e79d0ccc49a42961f4b91 2 @@ -11038 +11038 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 6ab7818b80794b17a10ace388c7f51276e3b13e4aca45d0a4f86dc5f1bba5b7c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html f5c33fe7728b055ca300e268a776b0be1d18f31474795f490deca08f364e06aa 2 @@ -11040 +11040 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 5034b9841e8e077fcb29b2ac6ac3ab1158c1600cc3aab908b33b1d8f4eac50d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 0466aff0b6e5960b03bc601bdec7d9c0f99f271a8c959186c0b09de02dc0b187 2 @@ -11045 +11045 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html a19531d421542b5a0a467ef3b6babd57c30893a91a4365322c7c240385cbd0b4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 5044d325a7dded6bd4c4c579372ecb6acdc88b9e94adea9d1cef42c7064558c3 2 @@ -11047 +11047 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 6f4e0b03d4469c1ab8a5fd3064619ad78293212944585231e4de70f615dfc7bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 52b95f6247ca6be8b4bbe4c7362694bdc83a15439a9c51269baac47ea1c8ebd0 2 @@ -11050 +11050 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 537c79287493a6e35ba83f0ae162aae53c6bc1b54e985db752886b831919581d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html eb51e01a271041d0fe6c98bfff996f900f32285544091e513d223685d26bdc36 2 @@ -11053 +11053 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 75be071d26b98c9a498fbac307ad6c3087aaba49c046bdfec1d48239a3b42ba8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html b22638ba0264900f6ea135eafaba80e546d74d612600907bb1e94c14f71a473b 2 @@ -11055 +11055 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html e41637acefdd02b9ddab18c9c771b373cbcf897a55f794b023647587bc9868c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 7cc8a2b1276a37e589d8eb35bbf8c359f26021f0627193bd30774cdd4805de62 2 @@ -11058 +11058 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 6c82f7cd411c649c7cf1eaa16cf2429034ef1f212e905ced5435be173bb4bc53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 435479a96efaf371d439873608f9480d64893afeab3f610712fd1027ee784f6d 2 @@ -11064 +11064 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 0cdad5b2cf9204a07fbe8ead6ff8e0a37d99915ac309dc8457973b7464a510ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 4c1116fd134b1c7b148551abc6d9940085c5361c5e54f50ba01afa5d0c595cb5 2 @@ -11067 +11067 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 8135c4f276aebb2643a971ee358f681aec3b5e280e4d8883d7227ff5716dc332 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html ab7f2f89737cf7330cecaa7efea0c7ba1028bf34aa83c24f3aebb475af983ce0 2 @@ -11070 +11070 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 5320b3453a9b8b885ac7414b78fe0c40103e680d9832660c66a1de3edc39cb5d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 56488d159eb45ce7a0971bd43c207be47db590d64c37ca0bb5ee9eb10254a3b7 2 @@ -11073 +11073 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2fa9257973dcfde833b2961bcdccdd1e9644434c8e0f5cedd850cb15df8a3e7e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 58438143d149ae7c0d67e18ad4bb241c6a6d8ce2414cb67def2da53be29c99c5 2 @@ -11076 +11076 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 30d1a9b2973e33dfb2aa87121e142964a27c64accd0398adf799ecf31ea6bcfd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 06286e060621b2ba254ce737fde210d4106cef415cd876f5acf2bf4bffcc992c 2 @@ -11079 +11079 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 1bda0b381ca1635a9fc893f83dbf205ee31825375912cfa7e518946bfa1fab82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 3a97e3dbce6ec0ae61efc2def5ad2737fc967ad921c38f596f366466e40558ce 2 @@ -11081 +11081 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 523e3a99ea754d485249c0ef6479b5cc76e6f298971a97baca699561315600cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2d5985764952be4ae5faee62b78cd8fd58ee34ff9bb1a3b352dd0da80d8e4bc5 2 @@ -11084 +11084 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html af8647dc84088e32fdcc037521ddc51b8c52df188b9eb8cc324a3d503c61a66e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 34996e4eb77c721bf5587432a02ecf29040fde808459b1d334e5b6214e2965ad 2 @@ -11088 +11088 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 42711d7c606faec73a0413d3c2a0b4be987dcbe7b35c76a296be383650ae514f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 3cb45be67dc480e7e7df4e4c817ea614caa0c07da5cb83aad1693fedc61eeb65 2 @@ -11090 +11090 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html b9be9ff0bde5bd6094a341e3f80421e1afeab6d843f664cfd5347a1a744f67c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html efa4780ba648c889f8544f9fad49c14351a258593b6d617d79172c79d289fb7f 2 @@ -11093 +11093 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html e3387df1539343816e017c4731e34a2d4877a510876771ceff0844ccedff2e70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html f1990cb43c91982720c4dc07c56d2a125212c2dd5c5d09473c3b183f0c4a62fa 2 @@ -11096 +11096 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 964eac969c958ad190250a5211218d722cc460a58dca8921b98ee92b2227e87b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html b008463e995de3f4e712dcfa870ca605df5b940e4f2a4d97e2b04de08e584212 2 @@ -11100 +11100 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 20dd4a6e47656f49e1f405d241b549839c9cb2a9e4d3b0438c8ee39c6f0020de 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 86f8fb1cdeef346ed11966cb9f1a8006ee1d9a500fea48f32ecaba04005cadd3 2 @@ -11103 +11103 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 0af5ffb1c160c4ceecff497d8cd7b32b3c61aa1e8575a312b53e05fa6ccf554e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 3872578fd9b2638196dc7b81d76ef1e8a93727aff4b910b25366b87aa84daa05 2 @@ -11109 +11109 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html b270afc57eb467e40097d6bb3a69dcca525d3dba3c8b73394fc3a9891637f178 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 8c8422e3260264732bfc71b0ccf9fd9221a6d6f032265f5d11cde772b2b9a3a1 2 @@ -11115 +11115 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html c0fd369cefe35142dcc0ae0d0f46516ca3323633888c4a182b954873432270aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html cfcb672786f687bed85278b9f13314fa36ebe7df1c52771b1afa3a1e33b1eb78 2 @@ -11117 +11117 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 484076057ac71f2fbe9440b0c5494bb21ef4b7c5a22fb2c2f268948e07e1d400 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html eca344795beaafcd79e3a98da0954a5a7774ca3fa1f4edccb15b8db2d87ca2ec 2 @@ -11121 +11121 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 270d423ee927c1f414e70214459d13a7bcdaba0b186fdb60bf6c942df2537b8f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 5bb402c8ad8c953771b637c43ef6d159ed4d4e4d6cb81968e0b1ba875c697edd 2 @@ -11123 +11123 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 507cd06b127a47a6a1a253d6a88c109f79bb3111ded9affd1944c539b64fe617 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html c9149956b9f009fd7f470555d4b68dca2c3a7194ef04d666622ec7c2cd195cae 2 @@ -11127 +11127 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html dca77d1b27ab56d28f4298726064a697ce02b5b653755972d3577e36a63a8f31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 7201f94986fe448d446302b3f46a6a8d8755a80cf6d89e753847d1a20d752b90 2 @@ -11130 +11130 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 90b91803e70453c45b7eb58b8728034514307fd036bd730352acb73161888423 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 126b82187c4648363458e44376f976bb6892a536dd21275f463842de06bc0fd3 2 @@ -11132 +11132 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html fd0fa67251a7cfaa7a97e166ff61d1fa6ee56777ee862a1f8655597ec5eef5dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 5cccef6a87c50af230c230bf4753fba479bf0cf50508edd4e57556c29e49a5f7 2 @@ -11134 +11134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html c4071a5c50ee0f7b59c0e015fe4353c1cdee590fc55557a44702301f1b0ac7d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html bacef94d2150876e84981cd71388296813b7d51fb503335d8c1fc16ab2ef8710 2 @@ -11139 +11139 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 1093efd6d1129e3fab5fb215e8ed8efb53c4becf53e0293081756e4396f9dd71 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2f44e708caa9ee2679f3085af01adfb519fd551607b239581c021c04c6af08ee 2 @@ -11142 +11142 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 9c047c7e3bc69f64cc705505462dd2e7ddd45367b459195288f7f03a4482f109 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 08605f4b42004e9fccc29bd484eb07a70e9fce3c172a1af0ef1f267b5ffbe659 2 @@ -11145 +11145 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html e993343788fe8d90e17e2d023c7b57147e103ffbf284d5c97ca402853e68eb72 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 4702b26f0251eef9db53851785eafe573b200e9f6413a2c0cbc16b062b95f65e 2 @@ -11147 +11147 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 8c7d49f85a9bc01b274e5c07769b9f6db3f735cacf2301db5ff019d0d3aec8d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 50c8b343be03c70d4c354dc55567a2bea569fa5f84af6d2f78ed45d5af3ef8e8 2 @@ -11153 +11153 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 9cd34a66e903f9b42656ec459f06d9efe347db7e882107421c96b1f942b2eceb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 91ee68d16d23eebebc1e33950549d43be8a27e7e9f05c0a0aba2b177c20de839 2 @@ -11163 +11163 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 79d1e1daa86dd87a9ae2fa4ec6b4d19f3a57b706b0ead1e9e19e27022bedf33b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 9ae18e5fede2229dbebbca232b7e9b2fa376b0f9a1e7fe82dac157c2415f5b5f 2 @@ -11166 +11166 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 4f5862f1000a4f852d6df41dc551071348cdf41579a4a689dabcb2002c2e9b16 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html d5458d972b4d0953694d3daf1e6d7993130e4fa94b816d2eed800a4b27d9ec61 2 @@ -11169 +11169 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 7f2f95e6a759e9ef66342f5c29874da339f4f19b542ba721ee3d4b722bf7b008 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html db1c85bb2d2fc40ea900dd5e4cfc27eb1b9013da3576684a3d0a7d13b739c5b5 2 @@ -11172 +11172 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 5fe08b4de924835486af1cc1f7941658eee1bd360bda62e921ac346a42aa36d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 50917ed40a6e477180b8bd73803fad550c796739bc3506d923b62bd04dfd5bca 2 @@ -11174 +11174 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 4383671c0365e1caf514ca22a2c8c8742abbf9ac910ad347728d7668ef888918 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 081a38a0b599f29e7e3f099b9456481593e32db9f937c971df1fdca64fbdf598 2 @@ -11177 +11177 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html d7b4a17bdc5caec9ca3a63aa2746a9a3c7a702a39a1bb45ce4fee31f1ec24225 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html b332eef8a147a8812672d53aa1ed62715466990d85e02438e35d3a8eab2231e6 2 @@ -11180 +11180 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html bc4e3826d371f250c42a0e55e115cedf8c08dae81e7f8c359e6102d5f4fe1bdf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html e394a409609a67720e318892b3cf8f82a2f1adb2bfc696c7259446672060695c 2 @@ -11183 +11183 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 8b4fdd875e31803f664079ba66193ff418931eb40a2ea7e228edcb7195313f11 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html b8565e4910fcb32c678a1d325cd99fbeff6283a62eb73129205e47a35fc724dd 2 @@ -11187 +11187 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 81f222d8f08978acc0bf4e0a8bffb87b6778fe0d4b75f8c62857ca1e608537b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 67d803d424522f6097577a46deea04ecc9e6b5b05a1917ab6fceab8d663a9b11 2 @@ -11190 +11190 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 8b20d03a6e2e67b3aec2ef7f77530071ebeedf9d5c242711dac9ca2bd244f304 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html c443a1d210392202ae60942f80b15f17d1a418c0f9639e9f607d0da621fc2f88 2 @@ -11193 +11193 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html bffa4fc69cc84381e4dda93cf08f7e38441420fefaff236214dfeec0bd1328f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html f0f0c8ef8f0a8b66ec24202ab1934dfe4e803b98d8d177fa2114da54bd2dcd1d 2 @@ -11196 +11196 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 32cfb2e90ac6d8d8f325e6e50149bb6aedfbaafd48980f130854765b09c32201 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 33c5b4f693c0893264bd6b7cc6c768008e469f1816e4ec9ab7aded450a9bbafc 2 @@ -11198 +11198 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 7366e5a6ca5caa73dce73e5158689d8e9893750d43330ffa64b85b9d0457dcb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 7ebf634aefeb8b7359a8a447ab44c367d08cea3220a42166560dcbbc2156da5b 2 @@ -11202 +11202 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html ad7af6f792de4756c4a86062b1f26304baaafbcc3e924c213debf990fc7aa731 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 6bfb790e43b0bad06e061ba1c16a52e13eb4150c2cf0ff3854ac95e1b6feaa74 2 @@ -11205 +11205 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 6e3b0519575528bf2557b2c8d03a057710e352a7077f2b43d9fcd4c0b854cd5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html f1d0a4bf4ff52a8d626315fa7a5f6733dfc1b0babc5faaf582b68e831f262dd8 2 @@ -11207 +11207 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2a10ba75f9c1bf774eb42d1c5a2918566d1a4003310a7c7e18efb142e2c15687 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 69a119e701bc4693e48bcf04113dd9f53d40cbbce8a74e850c484aa9b2eab39e 2 @@ -11211 +11211 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 22924b8cad21174754e08b04056b36c245baf826a24c48462fceb44bb0b46b31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 70a22fc7fceaaf07c89887822db84d98d0461c156f9c8553684ced890ec17d46 2 @@ -11214 +11214 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 7b82abe0461a9935503d74de759475aca1a77d4fbbeda6e7b8cb0cae82f4931d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html bf7d4903d367ff759740644d4cba9627a58589641c0e337ca83b5e8f0bc55383 2 @@ -11216 +11216 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 13471178ba0112e39f15a99a94439c2a63b4eb588da87683bebdec74b2ec849a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 4648f189ea268015c4ac6b1c91a34b108b86e073c36c45e198645f1346b2c320 2 @@ -11220 +11220 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html d5e6d975c532a03d716a8c5db8e38727051a7332af8fb96b982a729aa6f88b23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 5d87104d7eadff05eebc728a998cf23d1c2e7085c1474aa9388325f248b65c75 2 @@ -11223 +11223 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html db15dcc7696289627f59167c4547422eeb4b9ddba2abed8396fad8bd79545b9d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 091f659f8d9e589565444d122883bd98ea3cf7e8f892868832d0b30307603db4 2 @@ -11226 +11226 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html e5411214727397d6ce7594f4b6abc19cfbe0c64453fb0e4444a4d536506bbd1c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html a6bf4bb2deb4cc304aed3fe6954f46016d37e03bdbbdbb01536fc068608d8714 2 @@ -11238 +11238 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html c25ed6d78a14fffaa976b44d14cad9bcd4ec0a8ea24a55dadebb3a9f35519c48 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 088cbd2c7c21baa99eed37c53fc75309cdd9372542c816bdf41a72af29cba663 2 @@ -11240 +11240 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 0be8b2f59d17b36a9649f43ec1e778b14e316d41ad7ced80249aa11998e94775 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 766f92aef6c4b890ca8ebaf6701a736c278d726d6562ead3d4014b7794f07104 2 @@ -11247 +11247 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 4d32cc805dead72d0eb9b6c0a4ea87b3ce808fdd3bc3b4bb04db7173eceecc9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 8a9346c3cfb21fa7704ebec41f960aadb053fd271fc5a0d5544e6f3e36a51478 2 @@ -11249 +11249 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html d92c1492b34a7460c0d98ef8d9f76647795670b4fcb70fe0d06f89d57b18b4d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 87e6941dd1385b15476bfb142a212031004080d7c29a5cfda95917499fbaaa80 2 @@ -11252 +11252 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 1fb7d3f909a7b1df4b462be27bc71c7199381f9e749a16b84c0c737bee3d6e6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 4bfcca2872bb916cd2ca097e9e6ed4fd185d2fe28bf05b6e13c2e8670db5074d 2 @@ -11256 +11256 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html b0110039b7a9088cf59f8b091bc48a8ded2bc5e3eebb471cd0765bb74c9cd3dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html cd65e1b9f89c4bd9009975905488cf1e814f8fd3797aa98123b709d7560ee511 2 @@ -11258 +11258 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 0c0a3d79686d61e4598736a0bba8a56e21d17f8eea25e255a1052607f5397f3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 6f736b96880478d6a0cd681eee463335fdd4709000c4100eef9c67a0a9221f7c 2 @@ -11264 +11264 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 21605e482dead0d42ac53286e1fd4b1338fdd8bd901873f024476ac69845e434 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html bd5d114aeaa4da88303ea0401b24b0154840314a803b1ec7c000b344458f5bc4 2 @@ -11267 +11267 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html e1744b50e24b0fdc54cee8399d51bddc60be695255a1dd528293d4d1e2b744e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html b734c55fbb99a48ece2f91dcfe4af34a1c64ab2731c3e4c9af885e04c46cd6c2 2 @@ -11283 +11283 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html d1fa5cbdf15b380486b24754ae053ac1c1cbd28c9425099ea6386e2caafcce13 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 17e67d0a11ab98473aff0c42f05646bee97811f0e73ebc50259efced397df7ca 2 @@ -11337 +11337 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 8d2eaff9b92b9c52a240c64a44f5dad1961cfdcc36eac658d66ef317070478ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 58b3846875f8a97d6098aa30bd7094112be90138c619b77b148e9a6d25426332 2 @@ -11340 +11340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html b2da9c20b106f9a8514414d4161adb7de86e004d4a78f9de96a12cf83f0bacba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html fe1d9a7275777e28df481fe5267d3b0e8d1ab77f47eae671219059705d10f550 2 @@ -11349 +11349 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 364627d2ff1f829857eed410171fa5a28b6bda7caba5cdc4bcde6904a113145d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html bd68cf7cd34a96e808926f78a44886e4ea72a47fc62623ddd76d841bd744c333 2 @@ -11358 +11358 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 43ce09e4e328f7b15b9ec4584e90ad64febdf0fbeabdb8d94fae7380b71cab3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html e88c44f066c9298d0385a51969d00c9938ca3b91eb67e21145825925a8ac3e7c 2 @@ -11364 +11364 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html eec04d6fc11abbe9189fbb895a4f241d5aaa3e37c77bd1528b6888ef1ab4749b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html e777a28ba1f419205f43e25a8b51c07650e7c84a8858b09b50551e3d8ac20daf 2 @@ -11370 +11370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 7cdaf3755977215c01a027da51f75a87c3d051e2af8d4b1c3773fa83d014c64a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 7724a4e9fab56c254162f2f596a602d98c1d410c3fca156ffb0cbe1bd30f9c1d 2 @@ -11376 +11376 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 18691bf34bb6c3a0c6ad6b6b4da2a7c4d42d9aaa0ba7555acebff937d2447ccd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html df9c85820d94c33cb0aef6a90426d5e23b8675c959e93808e3ce1e920b4cf1bc 2 @@ -11379 +11379 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html febcf399dcb4755cc6078c4da19c910a6cc47519722ac76bedf290337d55fbbb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 94d73831239936f7773d35541e46c5cfd859e54b1af00250dc6c05a8c1bf9a7d 2 @@ -11382 +11382 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html cd771928fd34d06f09d2e5644b2bfd3ecf19ecd727c28bf2ac14c031e310b9ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 1e8da3d7e8c96b040703925b734dbc684b8d831b103b3c221cff01757536ccad 2 @@ -11385 +11385 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 3811d12c3e0c5a115452fba029573bea67c94ee3c4e62b75af2d39321074e585 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 0b18b44fe39acf4f693dfb9e782ae180fd4ee3a62827a43e717fecb4eff51541 2 @@ -11388 +11388 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html a53f88cd02be4f261b8812bcbd8b4aa1f87fe6b7dd5d2e6a9b6f5c1dcb9fe872 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html a8d84d71f4131786ef9bed1280009264b24f4bd6da8fb53226a3ea6d56fe2d7d 2 @@ -11391 +11391 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 077257f6fa7a3abf248974b9bd05c5864c1ffe31ffed9da6094c9023bf24678a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html c108f29ceb9cf002d2661ec6946215403d6a0dd815e4203ef364a465f9fcab48 2 @@ -11409 +11409 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 91e31d2ec7f010cf78e1f2ca6dfbf5393c5ddd49ca205b4d242af46219d84bb5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 912fd414312618a68c4cf21bc238256666053053b58de45d91fea1346985dbac 2 @@ -11446 +11446 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 1b092d16abebcad3fb1cbadb10d7cb1bf50f75133ac5313761f330d691a96e42 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 13d3b32b75b7b79e4f35ebd9015f1078e974c8a5340628ef410cd15ff4362f9f 2 @@ -11484 +11484 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html b69799fd5e8fde32e803c06d56737f4be87e0cd52d7a5f207d60cd3fe088e911 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 0ef73caaae1895cb18f42cbd7b670d024bb05b0834aa5214728a7a942e2b6b62 2 @@ -11487 +11487 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 7132bdf0a4e71056b40a27cf40a15fa08e7e667a8c8033849ce160ee4c86c0c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 1e8a811091b3d9142bbd45697ea2882fff5697b16925da60b76ba2a10477466f 2 @@ -11489 +11489 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html e393f6b2aedf4ce5553b1f9125bac785bf33a4097bc07dd4f1f64b8257462b27 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 4b3194a468a5b4e0986069afe11895a03c7981c64b114beac41fd8042e9d145d 2 @@ -11492 +11492 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 865368620af049c434bb1ad9f54fb633afa194e4c69ed4f5d5eb4a11db0eaa5a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 6eca1ce65da503fd1e71ab6c5c461e833bdb5806ac057937e09523200cd0b62d 2 @@ -11505 +11505 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 10234c5d48ecc62ff84da0ebe52eed509b12c83003a593e654d03b526f062022 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 4e9a88383d23b8727c60dab3447e9953359da3a49325e95efbdaa2980c4c86c9 2 @@ -11556 +11556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 343a2b427cc164da23bb39c0926ef9ed5768abc83f5391b14da81989d0aaa30e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 56558bafdada3e640c4ad2d3eaa6aa99df7c86e899e93af43df11a178f49505b 2 @@ -11563 +11563 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 8f01bc3b76624c89c066323ea7a35a289fc22e33c25b73452672595c9d1d8b90 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 5bdf22b0f3206e81716182adfe81fa126714da790612f291f8dc06b9718caa50 2 @@ -11568 +11568 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html a3db45efee74e2e1cd1bb8652dcc3c390530f5b5164364d5249c3b83f73fdd5b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 5094da541d08c3f7ff449fde20673cd558fa2be2cac1b66b4e8d67a9280ce48d 2 @@ -11573 +11573 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 8df9908a026b1e397e389974c781fbf128175cd0b92d17e48100f2c017b61a3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html ad7a24500b8604f30ca434c1f34d726b757009749e32342105c90fd48d7d2255 2 @@ -11578 +11578 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html c4b78a0b74cd78395ffc8c6063e4087d422bde986eecc2d414227ddc8f11496b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 1b524d10fabe13b5d321b5a75b75cb4e92505f407ad1b6d3584f97542ffcfd70 2 @@ -11581 +11581 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2cc94f35d7948de7b8a21d81a278e81a6a8a339ad01b8852b7bef0dc53b2467b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html d56ced81cbc888342bc0640b2f5bc1b1a66e88ff1e22cb39ed9cae00f392cf94 2 @@ -11599 +11599 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 16b99dbd6bb61aba30d4756cb45381b57a35938db03e1c413fb4f519c1f249b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 6732ede551a98a17410f59dd440ff086c1915b3b180a865d1abb0faf9e97aae2 2 @@ -11618 +11618 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 9720296c068aa49b0850e13a4b0f3e330cbbe31195601339839a96f196ae3b9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 96ff39c3f51ace91e50c9b5effd1bfadad039bd0972f56277f808aaa151d236a 2 @@ -11626 +11626 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 7e27d4ab5e99f89b257a8c299d531b71e98119424089d4700c30f6e5c0b20952 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 5f9baa3a3344ddfeca1242148e46f76e37ab2bb6f6a558aa28ef13f035ce6a1d 2 @@ -11632 +11632 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 973dc13ec24564f2baf1a194595027a38ca82084ec87b96aaae66b990c7506f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 95cf3288340b6039b3a567fd87ae194df2aa064bfe2be16bb4383e7ab4e43de8 2 @@ -11635 +11635 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 31760487a40e333e1dc953bd9e139cd7435ca616c86bfb44e38a85b39be4b277 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 4a244ace8ff209301fd04534b755b061e5edd446b5cbe48969236870870d8059 2 @@ -11641 +11641 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html b33156dba47a7cc2f21db207678b92c47bc37e3016de90075b418bc938da0bc3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html b4329777a41b5d915adaa720bcbe85365f4e380d4e4421e16bfd1c1a06dc9042 2 @@ -11749 +11749 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 1bdecf91ceac0b353f32d68c5c8f4d5dc3e325aaeb3b9506f9dfa81991ce6da2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 785f56782ca3fc900705f5172cecc96464cb0186b5442b7399d3b68e75e4c6ec 2 @@ -11752 +11752 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 0253dba6356d50dac2ca551abe2d1fa678d119f95d69213bc53a78185f7e0de4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 64953b38ef0555cba6c2bafd42ced9aef2bfec5102fb5f21c4aa2ff0921cd163 2 @@ -11754 +11754 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 5cb3ecdbdeba5fcb5d33e973ca97ffde68cf063e7bb91aadb9bad65f061ddb0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 432bac09a8497c39df9446a3634841a7a4c7bd73b0bfda95d5c789347991baff 2 @@ -11757 +11757 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 7fc131c8e2153414c740df688cf835bb19c087bc005166ab621f5a94fc575e1a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 3db5a1aa4b2093544bfe6cb8cf2846cef80b9d6c8e5abebdc0b33109c44ec6e5 2 @@ -11763 +11763 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 3283e0fe3a80ab6bed2d8e6276409ad42a5fd4c49f12dbfdec6b302a3f620a51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 300e8c0d04cba913e8977eef9a4680026d2d89ba7fb61a49607c6f3b948c2f88 2 @@ -11765 +11765 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 0471955bd9b3988d8d903df2cec3f8979af567fa38222bc27b06e4dfff7bc192 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html f3c424137b2e2b8ac8fd3dbd65daf190d5ff56338a4f198d8f96dabbda0dcb43 2 @@ -11767 +11767 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 9b7d90b696f3ddedc89fd851047383e1d18433ce973223d8ae7378467b9fd3a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 6c762d02918d80a25ed1ac321039e4c54d93ba77dfced4893af4a40ee85a9125 2 @@ -11771 +11771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 05018c58a7e9f5ab6f36a8f4f712e9188192bf69651fa5e5cc7d4231ac58024f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 49fe2dbddd300b7c46753412ee3b604f35146e20472db660b5e236f2d3d48b3b 2 @@ -11775 +11775 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 6a1f521db0a42c4f3b04b8f6ebeea9a03ea7b909a904208baa9a8aa18ae74725 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 7abeaf4fc3c53f9f1ab242b12970aecb9aa8cce7a0febc3f31cea27f83c2dede 2 @@ -11777 +11777 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 09ef2b07ca560afe1e37ec9a2ee8bc62c9f3097aae757c5211de08933e368fc6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 4875ba341f76fdc8c1fe6fae18e4a732f5d7d9d2d808ef66505ef0b95181e980 2 @@ -11782 +11782 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 5f91c4d8a4ea64621567ce25d7f50056279b098ce4c6325a7d1135f8dd66173e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 6112405cb1b3f8205915f2b1f5254302e75da40f4047d67272540276f44eb038 2 @@ -11784 +11784 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 1da195f3b408507cf819bd48445ee093a0c165047a29ae5fc3c8e202425469f9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 044d4d49ce63f208b840be595a2aa55bef8c597b51d29bdd7914afe3d910f490 2 @@ -11786 +11786 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 4103ea297eb8b882d1792e398fa061b62591959714720c26243210d04d136083 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html ee8de88065e74df38459bc8d85f4810ae22a37a484c381e5b20577e962da0e08 2 @@ -11790 +11790 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 4d4b824a1f81295baa86b6c9b2657f942f00503331d35c92bcdc2acb6d70d0ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 7bc6e023dba900ffc31fbca8379d936c7d85824f7e94a0b2ec1838abfcfec9d3 2 @@ -11793 +11793 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html d3ced655a78268a7d6aa5d0c21af851e973534a2d80b910866e6f80da95c9e6c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 4850f6122b24b9802b430c0c937a836559b59dee3e4473af471b08bf4c8d7dfd 2 @@ -11799 +11799 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html a8afbd5e813f19a88fc09cf33f50265b4ea8042b5f67192610d7cbe5b0d41e87 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html b7a83a2c03f7df8bf24db2e5c98675f49cd9ce73167cff5b05d80f389fcdaba0 2 @@ -11869 +11869 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 647fcc191d7e82eae8e57f0d9a49330ae840a739e04a2351ae197a0634f8cc40 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 07b8bd2f5ba66cf0c5090c14c2ed5de5974d5eb9b23d142aee58f657a614e866 2 @@ -11879 +11879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html c3c95611c965f329a8390b62d2a5bb132d46e09bb2086b56f2ece2432baf568c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 6c14af3b9e21911b11ca13e940d83a97cf3b8ac16d16d19274d170789fcb9fda 2 @@ -11913 +11913 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html 42dd2dd871f3d9098dad12067154745079a94015f12e43468739f364aa00b8f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html 8429a0f645ee4bf178cc5b5ef57868ac845e59fd5a192501bac587cf021fb663 2 @@ -11924 +11924 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2437811b9ddf2996a6e380a12f3b94e8d1276f38fe06bd3a3e8aa8347877bfc4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html ca9e5e04fc39c363ebd17b4ef9b49f0618a37b7e363f9c05ae3436152893503c 2 @@ -11927 +11927 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html dc5e4302502eea308d5ad862b0f9a33fa75e341ca1697aecb3180ce06d7e1dc9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 3af838b9bc8beeee88f8a984a1bbed52e3000214115fab5d5fcc96ac50e9e4cf 2 @@ -11930 +11930 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 24d7b410c745a5d1225823eaa03eb938a1fb72f43b85f3b1ee74885701c600d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 0e782a941b360d19e8363f4933be1f638081a67953c4b6fda779ee3a22a0c476 2 @@ -11933 +11933 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html d7c4b3bb799bea90168ab0eb0a56e25f68f1c7871429c4718bf0c58ba9ca56c6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html a422fe2155264f7cef76826b617213d15c36199b72a463b51659160b17a7881c 2 @@ -11935 +11935 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 690caa64ecc514e677cc67bf3796ca66c649bb6f8e95e288f907b44ad006ef5a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html c22744a5fa612aad74286c0c99536400d37978a6950c1995f84cf54539e9a34f 2 @@ -11937 +11937 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 0ab253f4de15c51a372712d7d5263cf6d96f0226bc4cd08e0b57ad9c503b74bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html af0b904cea9fea80f13a8f7444a2d8871ddc4689d6a6270b7a522b3937046a3a 2 @@ -11939 +11939 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2db810ccae0b5f42b69f894cd45e0d16825aa301ef6ed1ad7a14c1b20498e9ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 3e3913b82f39ed5146b438dfbdc5a7a0444d1a69a16fff889631bccc839ff56d 2 @@ -11942 +11942 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 05f47cac4aadabbcfe91c570eb1c63d43ca89901d9f203a5f8e9d1d823cac10e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 026ed758c234c2e8f98455fb44cf35960a08f7cafb088201bccfa3c328d0e5c1 2 @@ -11951 +11951 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html c02103d4e65a1209367d199c4e343eb7157236defc33c4edc537ee2bd5bbf36d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 9caab3ca91bf1d437a32505abf7ffe85e937d53ff2992b88ee8ae4bc590de4a2 2 @@ -11959 +11959 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 5f2e0b9ccafe22d40c7ad2c3049b52f85fce5f0783411986877e89f991487150 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html fecc0e73b5f21598aeb1e97fc24b2df273d4e60e8d7c14c9067b6f82a1d47e24 2 @@ -11974 +11974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 756d32e3ce5a5146e1e3f6459077a604a1369c40a8003bfbdf7edafe7ae4f1df 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 41c49be8dc97ce857225fce12af2b583289626109fd83da392ee94c6ccb9241c 2 @@ -11979 +11979 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html f30c4a4e92a0dddbba8ca439a023d87625659d42290078d130de44df4ab439a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html b09b637303ed51480d73d40e657ba42c4ee92830dec9e64ee637d380849e666e 2 @@ -11983 +11983 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 5c3a4a3f5124b77035e71212737fdb1e2080a0624589a5a8c2e83a4810ebe4f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 4bc7858517f9a4fa4ef14f699830f0120543aa78d0a4131fa3993e3f9fbaf1bf 2 @@ -11985 +11985 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 75715ca494963b3895c5c937e62e804c20ae6b4e7a06fd175b21853bbcb349ec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html c5c7f39eb31ab0a852f2650b223a69951e79f4b58855aa808131802b0b419a32 2 @@ -11987 +11987 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html f772dd97f715d54e7bbf0e966bbd2269d66058559b3f887fd8880916fa72d7b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 64e1adcfee1a4c855c67da28eec360578243facd8969683c1268e73cf5b3e6a1 2 @@ -11989 +11989 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 6227722954b46c6d506583ad4a2b377308db84867061881882e8e5f0711907c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 549e089211e3b96656923c47ffeb8230d5e0fda0512318440232a2bb85662254 2 @@ -11991 +11991 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 7c89a7ab27865c00200d0856d5ea16710510e71e69afce3228ade9e7f234b9a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 9c24c9d7b54a38b6fc2f38a25697cd33da19adeec4e5a7abb82aebc54b50e8e5 2 @@ -11993 +11993 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 76314f3d43a7466492e7f7951cfbdb590d2deb07a42777a1d64553e43214ee68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 13ad55f1c138e966938629d3ae48463fb6248e351e361634eb1eda97214c324c 2 @@ -11996 +11996 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 8f19aad605bd59bb5bc3b4ef3a51eef5c2d3cdeae4b36814542743ab1b7d3f10 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html d86ac474020cab189e8feb16ab3280ee1fb7e01ee75dd24a8e62af0e176e4217 2 @@ -11999 +11999 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 575d6c17b62371ac838c9cfed288b219a3fde2f274d50e91d6e226beabe995e1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html f68db8357461703abf7a2793a7912d9bb360bc54fba0633bc736bd74f97da5b6 2 @@ -12002 +12002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html d407d86795e20ca0d9d1fd5b1deb4a0109b0ba78cb38b9c87200c99de05f2488 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html f5bfa2294f631c612744b5c86dc085d8911953bb8c2df44f777112199da17a10 2 @@ -12005 +12005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 3b82642bd9e0432e2119ff2c748874d263a2f49ceacedd04638e7d31d30b80ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 4b8763a9bf31896d1d413fedcd49ba3416e1645af2d60bf7380de078346ac589 2 @@ -12008 +12008 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html c854bc4af47920691d45ad0ed09c6e6a63b0d3ff917942310038652944da623b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html eea9d5efc534811b85ea60e252eb6fc3b2b090f16661bf335a5198d916dc6c39 2 @@ -12011 +12011 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 76c4d09f950ad33b62d36f65133c12e9c49d8255f460ca28336ef2cd94c8fa79 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2938d542ab670b6ac0eacfbe45dc5fa9b35babdc164387d15478c04426904a68 2 @@ -12014 +12014 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html d33c24554939f44de51cb4200c4dc1ef0657d17948f7a661eb227fdaf9f5d50a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 9d66c13d2e153f24c90c3ca16131376a03153134a47b218c8687309614a918fb 2 @@ -12017 +12017 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 00f139d7dbc418f9849b475f4bcbef399b739a36a6a0aa548306e6eefb50c865 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html d148f023cf8b73b63065fad7e7bf049566108a8ed64495400012fbda17f34941 2 @@ -12020 +12020 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 0632fdf3e61b5491fc8c49a210eb724d150226afab822c9ca2f0a1be8d4b6213 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 30b28dbed599236a63f8491cbd826b61cb9828e6ca592d43004d0942e86f6b72 2 @@ -12023 +12023 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html d3fbd0a2ef768f6fe5b4b4c83f7d12462f5e8535cfcb1cb15905362ffd6c8f61 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html cc7df25ed5e4e7976036f021f873a9c99c72d42a90956ca889bec2e1494196b0 2 @@ -12026 +12026 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html f97407e6f9b568af81aa793dcb9d71e1c25bb8af4184af7def2d6fb8b8e85e16 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 0fec87aa0d6fcc1655e728178ed2471548ee4998991fb20bb20497bfc6935490 2 @@ -12029 +12029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 09d768c2764f05d1b477db1a6f841932280c4f0647ba4293b99254b0bdbc91a4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 0256f2f859c6f4ea7ff96823ebb19a45cd61b502bea453741adef4a0c0087bf3 2 @@ -12032 +12032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 89a746cc95a833bd2c983a3baada4fc279fd6da1ff8a5d805b009f96c94ed58f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 3d422c9731af401cff0bbc29e75f2f03324a7de7b276a229fec80bd55bd43538 2 @@ -12039 +12039 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 126264cd52c60b70b58ad0ec32301f9023ae40e20392b8aaaad6e0a1d29b40a8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 65f0a8754d55f43014f80a5046a6211551a9cdd148546b7ba88006384ffecb8c 2 @@ -12127 +12127 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 35a3e7502fff3bce8385115c7240821b4638590c95b3b91569786469945b0aee 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 53a8906df49c0ccd0fb5d2babc9eb5c36d77de0da954b19e4c13b19fb8f2fc7c 2 @@ -12130 +12130 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html d1f7fb0c425c0e584dc05ea3b4897c305440a3d1704812f9bc0c85fea75dadf8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html b3f051cce09e5ef6a8c60a39bb412015aa2f0a9a4fc4b27828e4de88b3c2f532 2 @@ -12134 +12134 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 19b7f7634aa518729f389b28dfc4c71fb99720776004d0ebba131de5ba773521 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 703b33d3c342a63f54ff6ea525a3bd722a5a65ebe05177282c4eb791f10c9dbe 2 @@ -12145 +12145 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html f47c5e74fb413388a43a8e187ce3fd6dd062d06171174e052b8f47ddbac2d97b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html 2bb26df589af9d708230f71e67c4382ea02730a27c913c473940ebed6375aa5a 2 @@ -12158 +12158 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html b3e76026cc4b3a947d918c39c1b2d3b3643848f645ef11c4612234cf15abdafa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html ba5902b5bc3ba3dfb4468010192c9a24e533eb9497adceac8bf295922d4efb57 2 @@ -12163 +12163 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html b774d2f2c1532983b028f214fc93da8fc44bd8f467dd02a0b7d98ab766e2d8cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 898287b2a4fb4545d6c1a19c13789b5507f456a0d87ad984965fab71d1929128 2 @@ -12166 +12166 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html 76334677afd7068174f71845651e83547471e14c2c0e8115e2296a2dcd42ef20 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html b29b1d3229c18949d45b0b0c00abdce5a75879f167d51cd7736614c741f6f2c1 2 @@ -12204 +12204 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 0506acc9c0fc3c83ec4a192bb80e94dfdd3cbc811795d2361dbfe6dccc2dc0ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html cfbe3679c0e40da5ee7b0a9236ca019e8cec2801a07e805922bda99dba08176c 2 @@ -12209 +12209 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html d21bccd75261a447096dc56bff1644e5217a81ecc8e51a367e25ff7486bb3c52 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 375546fb4db7ef62c807fc3c77cff176812963fa20e0237f4209c09348bc6f05 2 @@ -12214 +12214 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html df60c8ec448c6f062afb13ab98c7bbfb07e086d98da7f606b9d2859a5560c0fb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 3b42abfed070616a702d39beae4d0ea5f08eb2635329080d8f334c9547cf05ea 2 @@ -12229 +12229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 32cc1f234d6f89df77e46870f73874ad134479a00c84781a495f7ae31e4798b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2959e55ccf45ee8ad36d45db73f3140832c4f2ab239f2039af7a416d9e82aa30 2 @@ -12232 +12232 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html f598e66815ba422ab4d6559e5b4f57fad8a29fe4b49bfe67aacc1d801a20eaf2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 2a6acc6db5216ad0af9fc2e8793f31e5a0bb69536c26302a6cafd2aa470c982e 2 @@ -12241 +12241 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 17eee86d1012c8a52748577714de89bff1d9a4edc1ee5c4982438288f9f10579 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html d9914009764afcd67031b4645e62e1fb8a61017e27663cc31c147df7338404fc 2 @@ -12250 +12250 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 8b64235e91e9593c3acd2ceaf43d44ccfe281df6b59a9a6e0250f24e066160b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html ad1fac6dc915b8324c90916058db7f09eb5b864698f6efac039fa57718e11e85 2 @@ -12253 +12253 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 9767e175cbab624128a2b89a418fe4cd35f25fc03fb99833bf8eaca8ffe0d82f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 39c3fbdc10f64ccd604456c75d4c1e9445e6b57f8dc2088c1afe6985c306a432 2 @@ -12256 +12256 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 0261bd8bd8187b38954179bb64ff45ce326fade2bbe3c0efd315695fcbf1c61c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 71a9cc5444d0e83b0c1135646a277c411d2592d747dffd8a5d8747ae569561e5 2 @@ -12259 +12259 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 68f166e2c9d56c518ab33fa04ff964808116aeec4bf4395b8a760520f6de7af2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html c767f833b117bb9078d0519c01a41efa3dfa9d4c0a6256faf08bcbc9ad6c53e1 2 @@ -12262 +12262 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html ef48b2974c050111696de9498a9f441cdd6b689f9ecec62605b9dd2d101f4448 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html bb9af5d182f4fce68a7ed10fbc510a400fcce19f423d435b2546d44437e43309 2 @@ -12265 +12265 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 5eb2245d786bf907eb9793f7c40918c4aff1ec77c7872724e76095b8f0367e6c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 16db5ec339fec78190db8dcc0c8eba299cc6425f920955d0f55c55c3f22e055f 2 @@ -12268 +12268 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 42bd6d8b4989e648ac848606c29e5cc94bb416a7fd8e5e3b825f79f31a3c8c49 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html f6876d9ca094dfe040946ff131f204e22a1ff0915253ea3b801eeadaaa1f553d 2 @@ -12274 +12274 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html dc03e74cc846600c6ec9310c086fc80d8bef4df3b97f311cc511005b4133c01f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html fddac14e999a9b91eafbd23a448b891c89fd6dac74ebf5a6b04616dbab7d7497 2 @@ -12277 +12277 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 28390e04bfc3ccb3c27f11198206949d57d9e1f29a178ef8815deb1353db94c1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 4ac5f2b07ffe485ac35301bc0dc96456b80fa9260fbb2efb9af9254db78336ab 2 @@ -12326 +12326 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 72e773caca2342253d82e18565c1ce15b219e7a511264a130607c29abd023f97 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 952eb23d866c4ea110e32e4e1f2e76e2ee0e93ddb7e216cc202fd2ba3ec41aa2 2 @@ -12355 +12355 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 420ea0304d95cd329fee65f1d5d5821126ce73b26db89846b6a458d3bba51066 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 247b4ca6df0e2f798002cebacaba97b86925d7aaa3a7de36d71302675727616a 2 @@ -12360 +12360 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 175db470efb66e6ee80bbdd9cd38a2950b1e32c8ad901b5048a80d6c7d5cce07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 4a0150bb369ef73cca03fb4461b786fa180aea3fccc1fe77ee4d2a9186438971 2 @@ -12363 +12363 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html f36c44fa64e9ecadf688b2d2a3ef181b3299b75e79e424c7b5fa52a304680394 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html c70f1c58178998d88ddef92efd0656844f9c3c150491ce2f019b45297d4eed39 2 @@ -12365 +12365 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 436f19426e9c7ebaa61a02a24c602f41f1efc3835d370eeaab3e0fd75a3e0ab2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html e270feec11729bdb756ee0bb372187308b7685e9ebf9730c927b55db8fa2db88 2 @@ -12369 +12369 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 94314255e7704e0d2bf4d34e3902ce9d2b6816650ad6e8bfaadb58d6b5cb87f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 4004f76d607aeb07799af8d21125ea8ab28493c884a007714e99bdc5c821ec94 2 @@ -12371 +12371 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html f706a907dfd948217851631877197d37a7e64b6e4e7a12b1f2c6dcf55ce962e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html db2690afb73b28327a23f9c6e95911a8e3242d5db8c045ac9ee4cb8cb7d2bfb5 2 @@ -12375 +12375 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html b396c896cfd5b423b456a297a7a3b8536ef2d39385b9715d7d9a101c2bfd2be7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 66554d01105ad93a27392c584251e05e9667352e97a2024e3523548af23d7018 2 @@ -12383 +12383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html bdfe039f4482b20945528ff9b150ffecbf5d7a0f35f06431d4a6964e0fa01b01 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 7f276c02168c328141e421d22ff0a97747f1aa140c8bccda740ba576473e1447 2 @@ -12387 +12387 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html f278180f6928238b7908b2bd037ac17acd433b8912bf8fd76721d24f6a689871 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 261a37d0958da68cee420750e2d74b379be5d52ad856a70d1e9ed004e285545b 2 @@ -12410 +12410 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 9f1040304a94ac8f3f368d0e868fe15568adf2648ee398ab153c06c4ec02eb95 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 640a4ff21eef0b642fce6474f53709589c8d82c370f6f9c0efa582687940c253 2 @@ -12413 +12413 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html a861285a4f05f7e873ad6c888f0e71c4ddcb00b9c05bb4e29334092410d4792f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html a1981566667cf2487e8b4a66df85e00f539fa59555288a3c9ba8f95a7d50196a 2 @@ -12425 +12425 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 4b8b8fb8d484654b342c6a86c3b4cc9a602ff1db71239a4fd73c91dce00ba9db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 8b8a88cd72b17cb3cbb082bd26e24a7a9d3370ebaff18d5f0ac209b7598bde9d 2 @@ -12431 +12431 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 59ab4c4d0e026d252e23d8a36c231c320652f52688974bd642c378bfa799954e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 7d09d47c7b35d44cdefe8c788d8c7f1e0e40749d6d1facefbe55a9ddcf6d1f94 2 @@ -12437 +12437 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 61c3986009b376338606e3c72c8253b87d4819910185ad725eff03aa49c5a87e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 1ceec65f3b23938cf5aa396b6281d8d91468b398b662b8de807463e79384c5d7 2 @@ -12440 +12440 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 28e54cfdccdf494035c4a575bb7625db61264560a98e70493434b122df87332f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 8fcdc912f3751cac294071de8f898c2eaffd05be8a6abf4b2ef67ce3f0078cb0 2 @@ -12456 +12456 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html db0210c320b9052b4c7509c3811e62b3db056647f6822ce64c3e04b377f40090 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 454b08ae6a0324c925ecc18db693c46ae3a0783d6d9f6b3bd3bcf147746731fa 2 @@ -12491 +12491 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 8863d122520bcdde707293f87b037fde530f57bf7877851fca721b80b61a9161 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 6a2462450b707b3c49956614e65bfe1d8f7e32044a74cdf3f6067cfccf378f1e 2 @@ -12494 +12494 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html f6ea08e6ebec0d7b89edeeb3febf7eb7472ead474ba5a7c98b51e10e226564e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 5cb1fe2be42b315f90b601ac398d43789726de44a9fa8af6b3c2ac8c3ee802a1 2 @@ -12497 +12497 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 4f82df7588126e9f0753d92d2f8a402ff63d3deaff297b1dd838b96e8ff1a8d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 0707e0feeaaa159d95d66e8db676d66da9801f1e0910cfbed379a1b6a7fbb4bf 2 @@ -12500 +12500 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html e090211010ab449eaff230d9006fd17f4207d0d0d45fa556cb17c9541fdd8afd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 4063336b61c2b1e70d77aac9d9c613ab0418ac5e3cdb593290ed37c783675f88 2 @@ -12503 +12503 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 6d7504ce3f06932e4256780765910a013721f81edbcbc5fefff96d06887e84b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 873e78dde4e5a8aea876aacebff4ca5a2dcd9ead178aec27da6da18ae94e8341 2 @@ -12506 +12506 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html ee04ef3f3bf5207dd502ba51d1191959201560d3e932426e08e1cfb360d9789a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 3bcb2827cdde6ebd71fb373c732f561732a0144693ab297e99df0d8fb879141a 2 @@ -12509 +12509 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html a17c82f30438b2d63f44b02ab574e6a1029818eeb6f62c15a5375cf07fdd9813 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 3e16700aa130cd0ab2e7165a0aa6426999911138157c9a7c124f1752fa8425ad 2 @@ -12512 +12512 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 006bd4f0a1368e4206cabcb9473ef2c8cc2cadce6126157e7fb655bc6ff5af93 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 38c42ad38f791ca49e8be0f5b8d5a3e36b7f27cce81ccdb7bea414ea62256704 2 @@ -12515 +12515 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2415c5cae9d8970112d8647df0ed2a2639cbd1093fc740337058c0d8a7f9ee02 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html c045881dbb9f6628138c7e67e4a1c0ecd1dbcf6f108a6074659da5a0344e6151 2 @@ -12535 +12535 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 1df54ccde21fcdc5c6dbc222e061d136f4ec1f52e940cb47a5db2f1764969af7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html d9457993f21a9dae839d9d9a4b587fe7a17caa426988420dd7ee8e0db684ae54 2 @@ -12539 +12539 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 79e5a6b94170e1c1128d53928e17cb9b009bf55c31f1b33ba8459ba6de485ae7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html c5874b19a82d1780f6da3b503218afbbd99393b995471807b38db52ae1fdb617 2 @@ -12541 +12541 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html e6cce09fbd68205fef9ddc882da1b134ee394684c2c5d5bae31d34fe431dc725 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 9fcdc4a0f42b04e9d29937bd0ba5dcfae37188bda2b0b1a677f4749475dfaf12 2 @@ -12543 +12543 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 9594aa0e73b1c4009641def5c2d3dcdd4580e29e0ce5a4301bd9dbc847262047 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 63c5e97ac9a1af217c51fcb6fa2e99fa0abf6aa66db2ff8a6185c9e7c5903b94 2 @@ -12545 +12545 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2d753b11cb85d8a5b6a6456b4175e9ef2a9b0b2c49690ebabebbca394094cd9a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html b5b14367c2977bf907cad131b43d5b7d2187d747f829c1ea883e53439cec2588 2 @@ -12556 +12556 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 65d83c91369cc32a273716eda67225d40f24c3aeac1bc2ece912d0b15b6004f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html d5e8867079346668c5542d020c7fd9b205621a8effa42fd5c0874365c34b765c 2 @@ -12559 +12559 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 5e406aae698b0260707ad1d7146b34f053d75b8ae1b21b1788c55fff0fa4ff96 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 67fdb523ca0a810b62d84109ac72a98b447f28f6ab9a78211670a90872adf81a 2 @@ -12577 +12577 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 9472ccee1b2c4d5f4460f872aac8ba907f4bb6f47a6de96b8d278cae395c03f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html d22d39db31aed83bc50cdfc4eb5844c8d1c4fd50a95c246cf1c5fee8b87fd587 2 @@ -12598 +12598 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 0e8d5d947687ed25494f0e6ad8f56732b73686d0a1c8c588f3ecf4c8f4c598a7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 1d0d5854e9c603307235362e539d4925ae1a0b62f45c89ea4a663c8a331c04af 2 @@ -12619 +12619 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 1d2fd8af6c974c4ab1823c4dc8234c1642d00aa33c1846878256a339a4097351 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html bce1fb0205f3e2e5b80a6ba30329ef2c62a588e84a79e749c8d58ef116650ff2 2 @@ -12631 +12631 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html eb8099a9f580336e6ddb62bdfe10fcf2e639fcb182eca4ff000ef4d37ca4c1f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html e5fdaee4fc338172d27e75bc9ff688a82849678412ddf24918c1eb305a4ee070 2 @@ -12635 +12635 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 0c16abba03f1478e0fb0315a6e6fb6890e587862a8a4f70b71b51c25092f0f6f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html f72137b741b69bf53e88c7198c1c70725c3c98a6542631f948a375d1af8d95c8 2 @@ -12638 +12638 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 466a83606b57d43a9d24bba8d396f49ac398d341c36ff6af0a99ca8f147d4406 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html ccea15bc3de90773ced306becde2e52d916b1766e01980a58db7802deafa2ce8 2 @@ -12643 +12643 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 300c4bff00c2b910b2b3aeb6b6520d5664b65ba589a2cb7c79885ee9f9d1899d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html c6d758f3b86204be0b807671e88efa3bf363b11daa127f968ec6e0c9e7f17322 2 @@ -12646 +12646 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html f4f1588d2510a3fcc90f896f04e186b28e3d607bb6a986912fc839e5da04f328 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 68f245ae3e8cad42eca81a2b520572696c49676b4044cb2009595bcdc8723097 2 @@ -12648 +12648 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 0cc05ba1c7ed3b2cd060677eceaae25cd6a6ba5235b2f85cc30069a4d386c241 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 9d9dcdbb722ce9cf0cb71e6f505c16e66d9fc9dd147d218025b6fbe7f51b000d 2 @@ -12666 +12666 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 4bf4890abc78390d853d9c42ab5d92ded98c00b1740126192a33577cc7513c0b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html e9e0575f15715646c896434a1ebe80983b5076d91f5d7b6609929d0ccb2083a6 2 @@ -12674 +12674 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 12002958639051f8135d4dc05b380c017d8123beff7620b75005783f1c993941 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 3c7eb05734e1e669376098816f74cd063e3795222911fc6e1aba4bacbb7b99d3 2 @@ -12682 +12682 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 78315a0bbc55f1ca1a3079e194861b4938bbfd4f43de98f6887d9185195203dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 303a8aa55b7f32678c0c365730ff0012a45b988d27b1684f0c9a5f3960159de7 2 @@ -12686 +12686 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html bf77a94508769e6efb0dd0444c41f5567d36968d274a6b3969bac62b8b102ad1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 04133098fe8ed44c7594758f374c23a9d8c3f4c24b9fc444a7457c5293773350 2 @@ -12692 +12692 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 6b52771fd93520bb66eb133cfccdd904c499c14a91c5cf757616f8bc923574cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 09c294a9dbfe8cc4f46ad78b6d572f3075638e628bbc2dd2c00f7c5b33c63a83 2 @@ -12696 +12696 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 9a06f143eff3bcc330ad43e2c558f3961ff08d45f4510005adb5022f2162ebbf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 469dcdaae2035432f354f3cdb668d1d8006394208b4b811a69c6b1a22404ee2a 2 @@ -12700 +12700 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html aaf98289b0a3ce482fbfac72274d43bc5b333fc827b5cf00203066bd2a3a29d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 86d71293fe5124b59be269b515e849ef3e1d551290c4a9f599df5745f8f65401 2 @@ -12703 +12703 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 21d9432ffc73e3b18da1ebdb4e713d3c2138ef5cd916c4ceb63cd178349e053a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 460b03da7013d4b870916e142b43017dc399fec1aea5c561adbd0b23cc077f09 2 @@ -12732 +12732 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 24d43a5195761eb569e4e868a5176f68d0c98cbcf0ede309d20b37c97a690e2c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 6a1ab6aec007136cebc851cd26861a7c260f2a5cbc8e1a9f610836c5a71c85cc 2 @@ -12771 +12771 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 746c897441dff7ae9da24c565b57e44b44a9efc5e60f4384d2bcea00a3bb73b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 02d7ab38adbc1515bc32046561ce532fd02ff6245aa98caaa3543718e52d7ad2 2 @@ -12774 +12774 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 8c5f092fec1504b196dc1276554d4f2233a8f4eb85e4f6b544e430724950c5d8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 6202048166a3aac9d64393066d4f0707334eed0e050564e93512f31af6e81f03 2 @@ -12843 +12843 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 7203a178844a8509fe0d1533d850b0a92a024909662d68c1a6cbecacf9f3b98c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 7dc74529914b5ac0df07f53ab86ad09f2ea7921219917be2428032873b074efb 2 @@ -12848 +12848 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 1e1e52f5149302b0b038229adea71d448243df084358314b5f6afba3037ef29d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 0b86a3a550f593cbbbd4f8b020cc6c314877d3bba58cd2a34ac303ea2c69fb01 2 @@ -12854 +12854 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 731874741a0c128af666cebb7403ae9a4cfafc85fe63a9f88da57d278f18135f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html cc04c19b643dd3d64983fd64ebfd60517050abbf61d05c0ca623f10701f56e0c 2 @@ -12859 +12859 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html e4428d818921abb24f60b3ced77ab0d6cf24ec035e69d44073384de86b11320a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 773bc8220b353fd51f66d34ae73270f65ef39809907736cef4d5ee0c4ddf4f3f 2 @@ -12862 +12862 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html dae877f793a3428f1964ff83ee64e7c7498bb6f2ced406590a55d8cd206ad34f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 559fb54a0d362a6378994191e6c0cc996a015bd09cac68dfa49a15f842919081 2 @@ -12874 +12874 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 4419f2d0df322d8595df9cc13bafd8dd31adecce412b7e42a67ae330f4320b43 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 190e4b7922273a469f38f50897d64b99c18f12b16ce711b020942a33027cb266 2 @@ -12879 +12879 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html b3756ef93293fa6213a837b52f12a3fdcc24ac1d91b884106b01e80dfd019a2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 43bce5fcefc716c28fc64800f2faeba87ffd797b564d32432761796ebb9cadcc 2 @@ -12885 +12885 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html cbbab28c45e4033586acb1574f3c29074326a85ae13707fbe3d61827efbadd78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 59cafaceed3fb2f741489b0e28d0f855e44749640c20a84851d0dc40a0844cd1 2 @@ -12888 +12888 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 4bdc3fdda11da63425980fe1f4a02babcf436244875367e3f6c15e7a5568582e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 76e7cefa17260829163cec58ecb69f4680b995b2195e4ca855c77b3b11706d03 2 @@ -12891 +12891 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html d5da4d45907b5045c5e829a1cdb0fca215f505cc630da3fea7c3b2d6f6498cec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html d682609cbddb24408edb25fda8ebabd2a02c514843594c2b5cf5dfbcccaea57b 2 @@ -12960 +12960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html de70d0bd05ee96f60c0f9fae013e745b940bfc12bc8e3847b1e3f46c0030fa51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 74bc9f67230afb62e92d6e7f5856770234d5cedc012b870534eb94a3f64aace2 2 @@ -12979 +12979 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 184a2d94705dc8cc76dc514234fe17169a0c5283c7a1b6caa9b81feb60430824 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 4be7ce5015d8d9af99d861f35f49a3cf50cc91935e1669a292ab046314dec892 2 @@ -13028 +13028 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 69ca0d023a4fc6872faacca217c482582c3f93d741df2c9fc60e368c982e0ba9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 62ea80a0668085494a33f3eb8777d45ac5da694543a2428416af54066ff2219c 2 @@ -13031 +13031 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 3c3745c5e6097a77467be85b061d002f14dd1f056125b9a6566e5e41b0cb2ea6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 3eb2de76b0138ad2ec69b6ca208b3a6fc0c6f84d535efa45cb60e54711c692a8 2 @@ -13033 +13033 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html cf99d3ab5ab042d7ffa137f6587471ccdc04039cba89d32e2d464275f1f2818e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html 9cb647afd79d16343dc5a2fc486178db05c94a660be3d67b063dd3e435a9143d 2 @@ -13045 +13045 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html f8fe0def965375d073f22b353c58332577bbc843eb744b06ebf917209ed550c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 4166dedfacc00978dfc5a077c242e41e1ed41acde2a6770f963cbc2732abffef 2 @@ -13047 +13047 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 04ec642b4193d0752dc30ccf2a5b24d1ff8b4bf97747589954a08dcba41bcb35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 818e86721fe88e7fcd364fc3241ce6d31ec4c63ba63c2e0282cf0dc9e748e60e 2 @@ -13109 +13109 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html b78f81ef6afad241394c92430906fd43d9102691d2aa6495a998bb02cb2c9856 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 8073c632ef959536b2749761ea8c33d17978771b834675ae80d188dbbeab0cef 2 @@ -13236 +13236 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html fc142d18281014904643a4ea73b8ee176d791a1406559ac1379a81423fb073aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 3d2280e9e4342879acb3758af1ec61b7f6a9ea6fe3590c0e97843d2dc96e24a3 2 @@ -13316 +13316 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html fd55305c3ebc32562276d3b117fef6ba4c9b983851b08446c55c15e36e5216bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 8e294ffba2ccbedbe73afd6eb86d3b8daabec4e40333283ab71353c61373df59 2 @@ -13319 +13319 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 9eb2eb01fc3e08c89235d3e03d9c7756e3925028149347c56c37c09e85c466b6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 96229fff72327667d75ee61413aff8cef1507194274188906a672c04d05c872f 2 @@ -13332 +13332 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 32ed70826a89fe233bd45a626e5999bcd24fa6d62cfd3bb56d52e4b2332324e9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 8188f9b51246b961bb299eb80d29571b609d6e946c8d2319420b0a4098a59635 2 @@ -13334 +13334 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 3184d08e49fe2e299d3620ead87b9682be8e4975d3b6530be3c75fa43ff6200c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 072a9e71fee05716dc68766e1668a7cdfe73953898d59ec043c3a9a05faf4632 2 @@ -13340 +13340 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html a3bf34102120b629fe817e27216c1d7d88aa0eade96d36c4bc7bc34f5c0a2eb2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 66d829c233b25f2d3c4c1f589be7b8049b4d09ad278cf0acc51c0b7122044072 2 @@ -13345 +13345 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html c3c50e542f2da1edd46bb4b6cb2db24b343c5c25c5d85c9a903b89b0068d06c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html aa20f2401c68111726e530f396719a957bccdfdf742db2100403bc25fa316349 2 @@ -13539 +13539 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 70d8e2487fdfafa992f6b40d39e53ca0d6ad705475a2906ce57aebeabe9d91a5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html c3e5440a93707e4ca8947dd516dfc211cb93e7919971e0a4c8f73e81850ffa78 2 @@ -13544 +13544 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html f27f73dcd0eb1c4ddc290a4a529d3109b0c7905fd6ca55c9b4affe520da6f22d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 6eb4c3da6c1318323a6bee28ef834bcba47e15160ecf85953c96c12c180238bc 2 @@ -14107 +14107 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html 82e5ac57bd7667f4bb79159e5f16a125584d3161448cb7aad2aa97fa5aa879b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html a5fb472560f5bcee013cfebfa56819426f03ad1622256fa6033134e2a4dfdb37 2 @@ -14228 +14228 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html f326cb67bf116bed60be2adba8c279aaaaecb2b82c87044a7a8179ba6b4fc817 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 9cb4dec1f3cf6257fb28d079aef974eecbd08cb63c9560331bc3025ffb867038 2 @@ -14705 +14705 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 6506c14e1137e71afad256f30c7a1b3d3dcd7a968b62bd00417863d135fd3084 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 9fbb882bf6659517afc6ad4ade8de55aa0479829a257d31a6629cc84ceb0eac5 2 @@ -14708,2 +14708,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 79f2192f2ff1f964e861c6a0acb0717302415825ef88298b3692a10c09c097d7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html b426f34609b08a313aad86f409521e3dcb6b177043808ab866047e8c0e3274a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 3a48bcc96cedf15795cb5f144182ba3c3534572a646a4432d0b41bf686266088 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Exceptions.html 56c3f074d9731a34f88e1f25146b3da661b3357a5a9eb771a73eb7097cd5b917 2 @@ -14721 +14721 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2b406a5c78ffabc35c9e3053dedeacbdf3bd28563b86c5442e5449402ca38742 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 3e7cbfb1fd075e6bebe6daeb0b70b1f56618420cc99c281a00ae0268e6adac69 2 @@ -14751 +14751 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 757c4fd5031ebfa61226a8c00bb2c3fc90b7bbd3a7fe57ce44f76818e4bb2838 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html f654a19a856163a520659f359d0ffc5f659f78b18cb37576667a57778ea22bf1 2 @@ -14756 +14756 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html fd5e3766dffa1e268ed547488f7672d310503f92aefdb6ec7e560bb65e3249d1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html bb455b84b1d24c72b3b2b346e5f2e3d5bf17722e50db69e21129814a12100c45 2 @@ -14758 +14758 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 0b67bac979ffee507e4ca3e788e99c7a3027180c851d908ffa5f58ad5b208907 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html fb954909b7e1859f7b996d020ef8afea09597e14d427c68c41f8fa2d7f5e538f 2 @@ -14782 +14782 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 0a7706a75d803778fe97c4c3685ad000c595bb7aff9f6d68f00944c1310d3590 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html e40f35ad7ec55330f4e478ee7250a09de11f92607eb92dccf203d2f0a447893f 2 @@ -14788 +14788 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 3dd642a1c85cb4c8a9419a5e59df4e37e57784ac1f005b1fe5ff948f92102f14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 4a1a23119653ccf21b5e5bad5cd193829569107a531bfd38bc78ef286aeaa92b 2 @@ -14799 +14799 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2d51997ff9a23b7fd0c5bd3acad43d2ea04a080eab48b68fac51cc4a24855319 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 63960f317c6c70ac6a528b1d7ff43b40c814c49ae06be10f2850c05f8be73671 2 @@ -14804 +14804 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html bb5853f1edd1eb7fbb7f54e787dd49db6851b18da7df16a3334343f9fa103ed5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 40b94170bea4fb451658c93f7a0a2d0b8f1ccb8abf45156939954949a7771568 2 @@ -14808 +14808 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 6a53ad63982309f9652302548e47a52b9fc1bd8bf83a622ea0232d831c5f19ef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html e154c2ebde6cf7c36782e6417713bd0fad2eb557f7c95c60dc75b9096d55dd6d 2 @@ -14901 +14901 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index.html c7942b5ae28d6bc4554191155c0a011f3eb150bbad4647659f707e233d0e82d6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index.html 63da94bfc7de886566b8ae761da2dca4c00fa516ab8513017a5bce6ca34d15bc 2 @@ -14904 +14904 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 6ff40533c4c5f11d3b688c265a15e40777f4e19d8f067c4a12e3330d5511e78e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 3d31bde9121ae94f3b917047dede84baeb0b05b255f844df93a375583770d128 2 @@ -15245,2 +15245,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 5e108ca4c9eb56420f33dda7481588d568eb55c096461fd6684d09967a550306 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html fdb9dadce3dd1d93ecfd42a8ae15eead26b4e81258d13f4b624dbc5ca7c26e82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 979da966c1ff352f13092ce2a0419fa2342460eecb02c826c3a07bddd1e9b3fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html ddb495a9adbba149d9b4a9f635e9ca6047bf001c0d6331407c3e96cd5caf4331 2 @@ -15261,2 +15261,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html dc8377a06e4a395e9c76c64fad8da9d9523b3bb3cf36d958e065d34d663d44d4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 89180d2af5439e999436a23b3a37fb470fd5b0a1cf716b23d8f64862ee3d5d9e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 96e4297e7f6bd6bfae3924ba90aa59d4e95b633fd7c9fc43a2f466b6dec58e2a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html e077b6e46c2941b8ea1f0fbf9eac9b178b46a445be995ef818902eba9035c88b 2 @@ -15266 +15266 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html ffbc33c1b7f3287201e9ac24a28f316f798955fe5ba330f428f9c0a11e067983 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html c867fdb8aba955852f441552eccd9c0a2b8448fb7a6da98a18f4f224ef2cdea1 2 @@ -15272 +15272 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 3d8b4f5f57318eb030913be1c4633c91a7b6caeb03324cb40a4f069cec02a035 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 866b53997e45fb63f354a8cad942f350d00ff3721ce44a8f313e5202b912b215 2 @@ -15277 +15277 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html c4acee15ed5c898e13224df956ce7f55388b266a7e3abf9ed26551bc0d8e1c06 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html c51059a1390cf156b6c70f0e07765c8dbce745f3e0c05e9d0c6ff7f4fb281380 2 @@ -15282 +15282 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 03ddc01369405df4fdb33bbe52010d978232286c87174bcbb9b822df22f90813 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 11796d22367feba1f9c8b55e818a9e1c2cb45854293d412732d2183d175cc7b6 2 @@ -15290,2 +15290,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 00a344b6030bc34528118cae2263982a6bc29a81324d3c461263d8e3805db0da 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 9acf873a080bb0ac8c5bb1dc048fd915a4c0f505c50d45f5d39021d7280e5e1f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 85834f5067c9460e51d2402cf9787bd185941ac1435ea2d2edc3a6a8e8fbc78e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 58de4dbeb4810302449264eb0c765e73678dddf8b46e27c5c6b4b0bc5f1457b4 2 @@ -15299,2 +15299,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 826c532cc2ae8e66d82e327e5cf088702ab929a85648537aa53bf4c0fdb1788d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html c9a0cd981c26029d934d43cf0d750e786b530a1d23afc916b9383921dacaeeef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html bfde139e2770278f05cc53bf17c8081ee668aa1caf47f97527af03ccd116c124 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 8c4f93f934e9825faabdb88046c448b6524618b27629c5d2ff5102b1dc57a704 2 @@ -15309 +15309 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html e2a6e878ad1937f3ef2110533c4353ef2bdc134966138e7536f67438ea9bf0b4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 07ffc4d98d94e0469078a4c55f5623d7035aee600efc96ed5726d9d3583eef05 2 @@ -15312,2 +15312,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html b0959ba72714e1ded6ace51f80e05fc20c548fe8b73f4d12110b56c97f79157f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 1121d6959b776ce576176ae16827151c84ee0c1ae81f5bc00e6033899229fe52 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2496f573bfd2fe6b29c9872b14024bd25155e86367540773a4cb2544daa3950a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 206ca598c86309ea4dc6fd62de17c1b725e4833b5e7d0660eae20a13cb7af8d7 2 @@ -15324 +15324 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 1ea5cb882736d1b7a17d51156df7128b5322735f1e7b5dc3f0258185ec8a42e0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 5af24429308cde2a07603487621f8176a6508f18f76a56060f5cc3ce16e9274f 2 @@ -15340,8 +15340,8 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 3857281b79970af799ff9acde70fc72647494b47f94d6084306de3da5fafbab2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html fca9502cae4b5821ef24a80c6f60ef2658f7adac1d98c1ae73f52d7cbbffeeab 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html bbf0cd8ca8a97ea30f96cf07cddba11c196a87c60d52b4888b35e90f684301d1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 7657f65ea5252b75dabd2541894426d55e1b24995ee453409de60780796857ba 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 298f066531554fe2f15d394bf895d529f896f0f61e1d245f2d3fb008247ad950 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 30302e3324a7906ccd3448a339abe9588bf6b715886153935e67e6fc3518a571 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 41fc7df89e02b5905b152b83713d150bddf97a869dd89b9a0ec3f651a63278be 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 694cb1c3eec794c4c7c8c763da4eb96c5327bfaffa2244739131e9c7acfd10fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 5ad804780f9cea112998067c6b29ae31273cff0b34474cd47f975e0d634f7465 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html bff096a037583ea6fc8641527a5c041126b1ecbbd93bc882601b3ea158284e3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 46123144061f647519e1a34c692764e4f8f501c9005fe67599205abdad6fe92d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 16f3d5c6f106f51756a8859a1d98d1a164cfc1bb919989368558c261bf61cb6b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 975a86639fbbc5afa7f58f6be3880fb51c348003061ad069a54affdd2973ea7a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html fb973ef7dcd37569e9cdb406bbb301488d84f9d65e53c56e00fe28b61106dd57 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html da7c022df927804f6c695148741e06d7e62948d62e7aab7cd1e4d1deb9bcd6b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 1570175ba3066614469dbd5171cb5e7db661c898465506b9fb20e96733e6cf01 2 @@ -15368 +15368 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 734f9c7f0b31632d4ee8824413d978f2e59865cac999f51bc6c9880fa178d662 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 3c78d35e3141fe7476c7f424cac72b3c0c29e00e10e9ebbd889ea4323e4f79fd 2 @@ -15372,2 +15372,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 3ed707e1d2ef38449a90102a1139b417dd3f13e3e134b98679136998132a003f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html a95d4dd68ea658fe5361f545c3e0b8c3ad9061aa30f7ff543c7b2c318929e717 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 22cb98c9ee41904c9dd6a44feab0efff919bacb315eec9ed0f86a276b2f10e30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 29bdd934b296d81c76ee37f06729f7b839a9d2f68cb496f0bf5b23358fbeb60d 2 @@ -15381 +15381 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html cfdaa9bc3a28d2d605e9417f3e5e4d355ba5454a3668ad5d851305eab6cad4f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html d9bd35624bef53c023d9de9c05885dd1587b2e3e2c64e65caa085ffd28d1f802 2 @@ -15391,5 +15391,5 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 0dc5982cc9b29a4f9a786724f695d6bbf83737be751df7231628842e171693fa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html bcf7002a2310429564acee4493a35ddd7bb8b673227487d2062291f1047cdae1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 49151f699207af7fdbffb99e01f2dad82a7f7cebc80a4c4a3a2f48fa4ae10d65 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 40dbdf662b114654cea2e79be6f9fae79285fd506907c53f5e069b9ad018e114 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html cb0fa894a3125ec3961a845a8a66a0bfb3e3ed9ed7490c272a8e794e9efe241c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 871649bf6966bf88f26ff968b88cc31e0cce1c6e73c999263fabcb996a21b82f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 25af7140a9df897170ff635d200705e267060dd5a491e12808ed23ed79e9da9a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html fd44c9530912cab87565c6f30a521ab7a2e39794e462f7d1b148c4e65814c8b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 4541d763e98a89e489defb36343dd81d29395d31a061e78d5e9d6f304c9d11b0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 02fcc82831db5b4b3e72f4d2d752f2cb57c5e21a557af6119369693dddb00751 2 @@ -15397 +15397 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html bc159c6ef18783c1e2fd19302385e5bede074fbd05c735ea64e6016538355bc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 89b2a7d6689c72d7c6474c735738eafd51caf975d95310e88c9a4f29441a080c 2 @@ -15407,2 +15407,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html e14e7634cf87b57f31959c2c34e2c6ef2d3b1145e880a681433c237f9fd853ec 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html c5bf99724abdbf9c5ea43717a635103713d098396ee9e0682208926d6c354f33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html e08270ac882e2aa69b7fbb51f93beb89269143ae46ba8f841c0882a147ca5f55 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 643ae7c1747f30ffe655baafda06b2a51dcb58b7a8559770cf72002b6805a6ce 2 @@ -15412,2 +15412,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 16c97a03b915a2b5707522d62b1838a22edbaefa656186c17cf795586f931aeb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 22ab1ee27f754629af51dc8bd7474271159d45399a63bf87f8855086b81a850e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 99fb1ae7403a54470cffb0daa95af21ff773a3f238d432b13e1389ef104612a9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html a2ee3672de3ae92415b01e7a0c180d4b6cbdf4ca8b13ecaa4df95296498a3049 2 @@ -15415 +15415 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 016559f889e1519f4b666fb9326dd3643510e9eee260e5cc7501923ff1322546 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 5c0f12dccbbfdbc47629278b20b13a268587265dcb786bb9afe0c5edf255a0a0 2 @@ -15420 +15420 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 4864c3c51ddabf6fdbd97e3cf769a2558b9f0cc5c6e27aed12d50e8bdab134de 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html da33b2b8f5130d6d0edd38653e0d0cbe962065696343f1dcff973469cf617d6d 2 @@ -15441 +15441 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 54f9b84c4e7eefc355e29b4c8fe5c286041b6cfeb1780c7b730eab08846b2246 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 03a7da85b9a1444509b357a9dc5f1e36264c056b47e69e7677ae798264be22da 2 @@ -15443,3 +15443,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 35704d3b6429cb7febb9cc8cfe04bef0446cd9556740ce2bef9b44993cc8bfbb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html a17f7909fd7fe7a63b8a28e021c3cf46ce94a74b580e3157d0eb32954979ca59 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 3881438c0493d3fddc7814d9eb5feb05b44abb59cdd4fb185e18d81bf7d6edfb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 287e263a993672c5eaa2eb27dd872d049cabba7a79fefccd58e73cc6ed6b4fb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 5072eb7855e09bc9c6740bb13e2d4fb8c23992838ff43bea144041053002a0aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html c9ef8521f54f2f6628ec135c87be05b8f25cc8bfe676cb84fd52d03ee12df42e 2 @@ -15458 +15458 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 8baa6c54ee4ac18d197c6a687d9e8e00c7309db7c418506939f4824734ec703a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html c3f08e793935900725ced2c2d439c3c65ec9520c800c7e65452d2654f7dbc902 2 @@ -15478,2 +15478,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html c7ddfebd892ab4dafcce208b0d52ccb507111d85e64ca2ed2b1f62a902bb1a20 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 536d68b00fa29c1fbd58a7b9a86b78dfdb6a162345e4042de6fdadd6d88fb234 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 902e8722f8557948ffa29107b803ba4f1625bac45a736b64959dcecce4bd6ac8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html d7305ab4b373c05874eb2a2dd6f5cd9632458674c2dedb7fc1f0a365923eebec 2 @@ -15616 +15616 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 882a2b8cadb5ea2d030e5c481d44cdbc49bbd382de17580c8fff1325d312e617 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html ab3ace4e6d3e8bf1e65b09ebc236f69728a789c5ba30faa99a0303489bc71922 2 @@ -16711,4 +16711,4 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html c175480d06b83a3a4c53601b18464737d84e7ce1cf64bb7cc860c4841cb56d0d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 1c5078050e1526e883fd994c43141041b8d76650b9ccf2e9ff2efbb42aeb361d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2ad410689e062f0c1d8190df69d3bb5942baf0097000379d5c943cade46a3f36 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html d405e4a0d42ccad0c1cf3473314c9ec501492270bec6a7fceadc6908c7c15f4a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html fd0bd81f0d31e66ba5c5e83ac64d8eef883706c46ad306fb6cd41c95f7eea4b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html d5b664caa135436f21901c25ae25c77f3791991151c774e89c3897bfa551693e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html c70bc56ae960863badfe7e26ae077ad1bd915f9b2841e3b2a5f98210f4a67834 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html e1356c15040cfb93938701a6cd113ca6cd6b58763820643b0b58d5e688fa0e97 2 @@ -16716,3 +16716,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 38a88654511f8f66c398e5e51ed61047039e2a4397de13ec42bfa79331eca078 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 56367ac968bdcef04ab3e07784d73f09e607ce752d31cff1599b82fb1fdb4335 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html e20590eda9cf5ad70c28f238a794cebe69273dbaa07038f3294ddcac6806b5cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html c379dee1734b8412b1889460f8764d33c2e13d390ab65cbf9e9dddacf9002b84 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html d75a9d3e5110c838024a8111f981a97a00fa365e8ff7bb7a51809ab3564fc56c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 388c2a1f8a6c6a9867c6fdbbbe5bb9138b1a0498687bfe73471f9b75f79bea63 2 @@ -16720,40 +16720,40 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html a22dc8d7faf4637b30ab200cdf8a41e58bb90b7b030308910f505145421ae61a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html d20b625e346f95d73581effb0ebeddec5f70242ebdc50b9b8543fedee6959fca 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 1a71c276fd74154a089a8a2081dc4c3279d7539c7170460f39f7ce435c59d8b0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 0d1061d202f549333c3fa9875c4047e2352e57614afa175f820c0abe5ee929b3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html d61c189878ed3e931eda2f25286922da665f52dae09da61bfe7e4852556dfe27 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html b3622bb830c2082bdb0b01b18a38f4963963f4d1743fde0e98151309c64aea7d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html 31ebd093acced81e9e98848e56df632003c871185c828121764cfd13455004cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html d00f342cf56e74b5c9d7b7685f90704bd8d9372ad4899a5d0baf1b6a1e786c5a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 3a5675b977553940962f9fa303a4cdf10495400ce6b0c76de3df97620d4bd653 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html e499d09b8cebcb0428d54c22f8b0f53feac193b4e8c5f66d2202fa812b4eccde 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 1cd9d499bebac748cd9f502117d84ed885e0e403b30d4d7a2673deaa588e623f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html fb41c618e0c61a754d7f1234ce9e67323ddc315362a5880a51b82ced7eba6552 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 3121430d8f52633951287686776aef267049143dd7d8cbf7c51816eb49494841 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 0e2b5ac22e28b1709d36845483b57ba4f3fd8e2879d3e7d10d7ec49f30ccc8e0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 79b1adecafcc23ba2a7ee9441b1ff830076998bebf9051fec049172ed8661dd3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 9dd149ada5c8f073c425156972fea7a2de52bbc7cde7f63484868a38a340a36e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 35f918354eb4bc0d0089a9b11290cbde599068a59d4dbf7ae8f876133d470c46 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 8a3562ba3a08460619c14aae359218c8bfdc6b41aac20c1deb3670c2ad226f38 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 8348a26e0ef15ac2f2ec51b2a24116b1f91f57f18fa8a2f610dbaaecac5c8464 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 6833a296fd55cd9bfbc60dcb0c4ac9c6ff2e8a334393f82ff82be970491d3ed6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html d11f7a3862fb7516f2d9a917cc229f8a9e6e5cdd9168c3f56932308e2f8b8102 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html b25a3498bbe48075fea5dfc59c74df9cd652dd73f56aed609e47b01319465127 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 37776d92ef65494648329115b6c2efb62f957f5b7e68d68121fa0edc08e2219b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 3a48bd9f263f9b60da806f836db247307d58e7787fb17afeff2bd636ba356422 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 55a23e74c1638815a33f89e17f10b36fd6e425169bfdcb4ae990bfa858991e6d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html d112a91ba2dbc6f6a4dd21dfabb35694e0a9cd7ec4cf28c66559410eb780af39 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 8497b56733938f32566983eb7d78d8f564541168b65721aff5b171d0cea07b0a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html eeef84361a02efecc3d3db89404f170098125adfac0ca64d7eba794fa03bf916 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 1014660e1cfafa6b73eb47628eb50b92252cf3833e76d4759346f7f835830f4b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 298d6a39a19d535a35ac83a6a7c8114677c067a5752f9a5d15403db29c589ea3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 5c1aa2ed99f2fdae249ce0f61db0c4a6de04c6762d6664a8f5e1639417303dc8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 066a23a7a89a8d7bdf4482e7bf599a0ae1c86e98e60cd7325e29124aefdac5ae 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html e71882ae80e14bbc168285165631695fefe626e51886058852dfc1a7335acd05 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html ce54f04dfa235e7f6603ce58fe7d04de627aadb31a9348aa0a7f960636a31ce8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 07fa6b22e69a8d0e9de01d392e5f44b31c2e8ef8083a34b53be38ebc8e5bdac9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 6cd27a3ff8eb4a103e40fc6d2b606ef1e4d23b178610a5adc0e829446e257726 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html f26c0f463bf74c0d6d34aef78f22476fa0df27706b0ceb9cef35c1e8043438fa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html b69c29801b73b446b5129f50f2b72c5a5a0c4ff7e146a53d36e2af97bf3f937f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html dfdc7e3430a554ec55311d5480033efd0afd9820303b57a328e0e49a913d4219 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html c827418b088aed7b08e9c8a592e881e1f05614a50d3b80c73a490af6fa5e8b66 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 244e2202da79866f89b8307bd8e3d2ee6395229d7ff613f03e89db6e995d389a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 07341749e3e7467fd3e7433277e98176768db5184773c32c6d93d37a6a36ef4c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 0715e8de32e1dbd5cbd2f3891c87c36886f6960f533837ff559f47292810ee45 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html d348d95dd1fbf01d4076fc652b3eee1da57482c32e0466e383e42c39d50d7c7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html b4f70e873cb989043f37255e5cda6d8a4351deb37f4cdab40d93ef4a927c3e22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html bf027e3193fcd97f0bc633ce6e7a0404ac35b169c9ef1b632b8b68c5f4557b04 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html 474e890318fca60fa6008ee6467262bf3c9b6242a1e9e82bbf8d09787de4719d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 0cd03afeb8fe58c53b9b3a0d6b1b6b834aeeeff09cfcc89fb45af07310be7810 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2881158589becf15150c38a405a14c29a76816e936206d25398aead27dd4cbb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 7f3ca313a6d823b94b97d0b621b6e0db67d86b0895cd96086e3498c29e588c56 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 89e80de7faa2818c772cd96b0ae8429c9cf603f38e997aaac730d690707e0339 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html b961bbf9f0097d8872cd24a90d2b6ff29d582f3e21f01016be728e0dba35ef2d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 41f8f3a074ba69b1c2c9562929c8e4f198c38ad4f4441cec47764e8d8f76cecd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html eea469e771c1e12dce72bf9c85bb455a7f5c1325ff28465714eb82324b1a01c3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 13e45c7b9338976bee923ce2ac55a57acfbe081b2f408ba865f543594f520676 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html d7f35a7eb23a275c0919c3138d42a03d626e1b55ed362c9dc5c72b2121c22791 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 66cb6e1379624815b27f7d5d61da8ae892d5cc885ad4d57b800714de368bf351 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 1b4fdfde39c38710fbddc1f7c5ea6ec515c18b9a537f977f46015bf16ce05ffc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 213b61f4eda6939e40e435508a6ed9a46ac65014a666b3e75a0141703e694ce9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html cf642b30ae93131f4f1678026c8f8be347d729fa70403708f99b099acb469a33 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 7cea80736a3e5ee5bc6292752c54e7858b0c16c562201b096feb7ce956e5fab0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html a5ef1d0e4582fe7e63ed7a394d92821ea578dc283d54361c7addceb86612f114 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html dac4172e930f5dab54c885ef64041b9b9a7b350902cec084fb23bcb0174ab1e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 6124c55cea9df76c7691c4a8c31f95ffa74dadc2c4fa6e5ce303f87590546134 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 16320674d0f5f40c71c0fe2873171d914a4736232f6ecf2c777bfd2267d5d335 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 4f161211729a5b6174339c9d57b42e1a53f64f01ac10f0a8a275769db0c53fde 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html abee807a6b06b3961e51e45b8778bd9e397f8feab04a033ea806f00abe243f8c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2fde1dc34c031f48254707fa5d5019bfb84b927f2b56509ce80f4ed32efc88f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 31ae75fdf385bedccb890ee2050637cb0c8d1fc532fa0eecff70dd2174973567 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html dd1b505edfd9b722737d422edf4ab5ce0e14b5e457ce7c714acc338960ce78dc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 096094ad45af955bdec11fb05143fae512b6dc23b7e26f64468dc74604083522 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 8a47bf4109dab07e2d8ad9c0c86ca8275b853dce08589945cbbcfcaeff701f65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 8a49f142713b880ede7d69c8e6f27ecc4fa257524c64794cfb453e958d0eadc4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html c2d23c4c27921c8e7e006fe31f2176f44af29dc2a6b43a4f13971fba6da4ef1a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html b9b606de3db626778cf3b6b97e356f7ecb20dfd7ae18ccba39bf3121e906ddb4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 525751f4aa3dec54e40a861e9a0e6bf5bb01069d4cf51b273e594cd3a66d9a23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html bef7344ed5933b1a8c0940aee930705d3cd5cdc33b70bbb0628eb7bb8f508e7b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 7b7fd00661b99f20bc92eba82f665d7f8356576bd2753b3b0c01b8da02b3076c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 40d93b503afd277a07956f322709453ff61bc3c3b04dbbf88e112fbae9dc2e70 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2d5cfd750fc59110941a0608f746e8e0371689fd5f576193db49e4dadabaafee 2 @@ -16761,36 +16761,36 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 6b8347afada53209080f7957b6874253d7f3cf01e4a5978cb33a04af6e43827b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html d9ec8a6cf1b7953055fc9f38c1cbe872a3165de96b318ecd05b13a1e285a8a99 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 452dedb57a41c6c18a1ae2e5d5c34b903a874a738637cdc222386a9f1f8980b9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 41d2c4e564ae5208df93b105410396f47b4bc9e7264736b9d32692de9b0f1c10 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 10b88292594a5d4111526acbbc54f06abf7924ba7616a67720e3f707c6f0f9cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html fcf9f0b057d017231513874eb27c51e801db1fdf6510d985b718bc5ca7f4f29d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 5305c455e8e5f3e064512e8bed2f6574218bbbfb0e7127c75a701e2b90475630 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 02328a4df1b03cbb609357daf31f3448d35673631b000d3e8e27d621d1dc588c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 544a68ffb21541b9daa0ca22c9ebe4271825e93f4d9fd464cb193d662f7f9318 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 89c1056146d338844aa62b6a98cab7e9924f2918f702b93cd779a7e7ba1a95b5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 5ef18775217bd64a788173cd9df02a819e3087cd97e17f7175330fc1b6450b33 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 4bd68f4b0101f1dc8896b0ab3bd0785e7d560a3f134488212dade0225bb10b35 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 10bcfa3dd36ba926eaebd7518b6161e4de2f36d0e62fd22d2163c5f449149e5e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html d327a0129b9ee949abb31c05ce3ddb9d1cc26024ac8e82e66e55176640d51434 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html 09ce66a2b372bd01cbf3fe31489d7eed3a71066a9b24022505e283ec1fe8549a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 7e22b7453c6e6b1468e5e420d1bbd58c0550409866c72aa3090e8acabab74e41 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html f3ccaa87b3a43eef0ff5c163ae3d3129f6e8f0851cc52519b1a407cdb0324f5b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html bae818eeaf6e17a076670a0b24d270585fea7242ef2fdddb7c0495c224cff6d3 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html a4b204e869abb5bfc52efb2cd77b09bc8c80646f0b95fd96d98c435cc5cfa0de 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2ae609ed6550c5639341bcf9dd4cc6da601bf532e252f97d5e7fd3a23a27a8c7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html a082817b79183f18cd3a584d4e2979b77043c387f68f78aec6d6157121a279b8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 962895d4c480184929b2681f6d3e42c57998a1c6cbb9d645e3916d1b40a17002 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 23ed0e3abfbdda5b7cee9ca868357e32dd56d0c5d973807a61b432d07b4e1ebb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 49afb66ba3bdde79ba8d2d3699b745264584c2a0fe45a1c843cb3f77f4439673 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 28e7a46ba14450fbf4a009560b8ed0a8838ce15c178c13671f73642676d5ca80 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 95bb72d262bdcf53a42f562ca0cc5d0b3e3bc461c6190fa101b1d4ff862f3028 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html a79d661a270b63cc3996feeadc8c3a3c5ea23cbdebb788d693c04169f90821db 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html 58e0ff4013b14e3d7d8c68a75110b42a4b9cd8a8ccd4daf409cfb4b7bdec0e26 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 62149c77ec4169e883d34486c37bf12dddca2ecd33fa46de787624966d51ff15 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html f9c260ef5567c0e18b09279edcb1a7707f03f23203e5d420d7cb68c6195705d2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html e43076bb6c24a5c0724ef7acd79c4d5d87198399aba5af033a0ad6ee5da35b62 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html a72b267dc9e36d24861fc11da826b817726dd8710354a7a7e2d613722b0c79f1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html 22efb105466708fab51a9a0515a1642f8c25a4900be03c25fc09f3081535b16c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html d2e92967f719aa942429157318edeefe76c5c2fc76d87ac711a4e5cef7b5b17b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 7d5d2b7079e945e38a1316c13b68b63e459b8cace0d6244fd0449273a7cf6aaf 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 601786f5e23a0ca7304bd0885fbac9fba4ca25bcf008d6b90bb535267bfe4af7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 0c58ff8a5ff3782df8f949eb6202f4ba306d5aa4a5222a2b3f75949ea4492f17 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 100bf31aa04f36ce5dc9fe3452423cda26bb9d0c7a3bc7c878527590225aceeb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html dcc98a2358d1ecb898a5aeb641fdaae5ed283f982d24683e0b0efdf52b797b6e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 689e46d61c3207e13421ab9d9bb369061cab9ff7c7a075822d05fc14afea7313 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html bc1c1e3ee389d51bff5aa561b7f90e851b0414e19b385c60cf6e15e07c4fd527 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 64a4f5bb377f06a6513097db362e8430ba608569e571faf88bb6e64f0743a53f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 8440734a96131d60dcdff0a1649f34ddec6dd6288a9a6166f26254fc5cd647c1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 63adfa1d052e5c43a733a981ddf8cc1c10a6c9123eff99e3ad9820ce94f28751 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html a92ec517cb9e92ac9e50628d2116e58083b21d7bbb731eb0c7c1eb6dd0a27b65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 11668b6cf66158c786f93b282639d765020aa2485b3678ef5c542ddec2519a31 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html c3e4c6a9a6b42845b0a1a7d7a805f71f8ae3196a72b0ba525ee0253a88b7f282 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 752cac63363906fce155ce88c6565d318a4532194635e6fdcf4e1f2472e0654a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html a6a24ec6d15f27132d38edc9e3d900b35048421a86319a82b54ee03859a264bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html a34913193da8030c1f86a10d1edec4f2ed5f157c4d3a9d2f3e8103b35901dd0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html c2d9243161d2478e1c641dded4dd511e61f8a1ad99b4e37892d2dc6da7ef3d3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html b88beaec1f3fda96d738fe74dff5b7b272dbe48c2a9c9ac5093d79df085fd2c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2b3e3cea659e65f71b6fe60262074ebdb9cbe37f3776e5f6244999e3f2bebb32 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 071d74efe65b18fac5c1fc88fa107c9dfb7f18fd4cc73aa5945c194742acbff7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 0cd88a1c53c9f52b551f7c0c7c614b167cd4fa30aa37a9d4e02d7a31c04174f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 3eddfedfed082b40861d114dfcc3b1a86792ecdb58c6edeccbea349b4f26efa7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 4908cd247e97a147203cbc6588e4981dd0f1c42f2d7927911b2abd57d94fb76c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html b6360eff6eddc254758d5deefc7f857d299fdb119d233ea93ae1fefc677be75f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2b76db28b6799adecdd18933c9d896e8e21dcd5597c85913c963d45b8a104249 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 017d8d41fc1bce89163b616a160220dee050ae9606c9eba919e2a9dad188970c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 7e0165a17c592b2b072f5d2b834082b3de805d1ef04a044ae3c34f3ebe2ba7af 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 1d4df9d3617904a0e292614008c559651f1da3d5db9b755a5f471d6964bfe4ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html ffd234f86530cdcde827604323a8ca2dcd711ea589b58ccc5182b99e78fe3957 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html abfe45fb7f0c8e347f9f3bfff28124ab98c4d466a5074fe2c784e0e3e9ece462 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 79a3ede7a5d68b621d1a76c228ebe32f606dfad658e82b4225f0288b19f92fe3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html a8153f341f27e18d767c2bc4a119cd9ccc9f633be4fb2006a75b1c9967f9bb47 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html b0026c106fd775fe4f10e0277db625b216f126d6a20afced58ad5cf001973aef 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html 23493bc8b65687959ca8017ad3dcba9c19d0412f04a6610f4abb1f8777e919b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html 2e23a6e4ce6df109533f5066ca6e42b73a46446949e178c8942f2b35c1edbda5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html cac2a1b7cd04fcc96157d52c17f8399c7f9968d2bb333d2bbd66e5f620063ecc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 618dfab243d1abbbd2cec26e4ee9e64bc455a40bd14e8f4526e41c59161e8d2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 52d25c0d80e192efc32712354eb99959268861b05aea509a47fadabb7705b695 2 @@ -16843 +16843 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2122fe47a2766cf016d64dd644d342b9fb8307c2d9418efc463a0d25545f40a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 77b26d568231a0b293ebaa88c7dc87f4f9bdad9f3fa009e197e552155aa0dc39 2 @@ -16894 +16894 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html ac85651f95f88a5174eea3c3d9ca8a3abd753ffd9e24c3a8a54b3e6c28ed964c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html a273f608c23ea9b1e34d04eaec847bdfbf4074589130c3e5cce8aa647dab596b 2 @@ -16900 +16900 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html f96e95722ffc690a9d25bb3a50a5f9e39613edf9abc696ee263aa19153eda2ab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html bbe00ddfd497f507f27fef49b199bdcecc7d1863c5194182eac04c66f6f4bb4f 2 @@ -17080 +17080 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 71a7937fa755a3f9c9c5af3e5ce6d00152539a1440b877589482790691da3ccf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html e6104763104b92cd44ea7de8b7494ad48babc0f21d5a7c1e79921a30febd4130 2 @@ -17204 +17204 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 766ea5457a230af588c1a6da96ee7f5a7e9e156a54ed9c03b13078bcd378abee 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html b3dc037bedfedbeb9f547ed447949e7bf8d7d6bd81edcbbe3f9e2cb41a35a5e4 2 @@ -17208 +17208 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 64bc20bfbcac0d735c3535304dff737ffb69b94d52508833f3efd9e3e2d33a3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 54e313c557cde2fdb435cd0bde0f0f28decd1a4bf9e5a489f3f45fe6d754b522 2 @@ -17212 +17212 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html d40d5caee3cf53a378d72aa70d9468898f412c5177a2044cfaceed575ca87e73 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 78a9c0149d896ccf880615924156b934f5f5b046aade7482441338fbc425b77b 2 @@ -17364 +17364 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2726d726149f3a510d270ee5214f26340698c2801d572bf837ee4da2d10a028b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html b15a74005574db11dd53e9bea36da911661bb9f2ea91cf8a571003760b729e9e 2 @@ -17395 +17395 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 3e93b0e2bf3188126e370e93fccdbb5d99b1790d9a6fb2a06fa52e16a6a5dfd1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 9931571cf04a212d955cbc71a581ace2173fe50fcd08ffc227096657d565ce77 2 @@ -17440 +17440 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html dc4f7e43b6342dce9153048ca2398bf83a0a91521148968bbc5a33588e1876be 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 44dd68aa250f0c8b1323884a60cf2be0f57cc9b26d20bfbeacae0f13b84a0640 2 @@ -18344 +18344 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 09d6b9bad6965ee04f894e44d439185be98b43d2246bc2b7fd334416c2ab076c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html d2847e2db38dcaa5bb3dd99dde7a1d860a34ef59c57a13ae22ca1d9c696129dc 2 @@ -18348 +18348 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html e079f35e92bfb2329ba032001e74074ba0584dadea972685191cb88acbc40dab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 7a533c77333db39fcaa6208d713922d2dcfb5e722bcbc8122499bd36285d8c0f 2 @@ -18380 +18380 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 43dd12757b794d6a1f1427a759a32a1892998d291e129e9e195b66a03bc3b3fe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html ad2afd01579ad02e41ce5e1369a8c198b9d7367c91b4ccf0a20137c389cb00f3 2 @@ -18444 +18444 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 3d62d491c88e9144a55152ddee9f9f007723a84d6d68dd9971edd62e964b1709 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html cd59ae60be146ecb8f9a402203752c406fdb98509d64f0d5faf395663e875106 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-12-27 18:24:52.224759880 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-12-27 18:24:52.228759908 +0000 @@ -116,7 +116,7 @@
Block (linear algebra)

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -127,9 +127,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes topic. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -148,7 +148,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems topic and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -177,14 +177,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

for (auto &face : triangulation.active_face_iterators())
if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -257,7 +257,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the topic on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -279,7 +279,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -321,9 +321,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -345,7 +345,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -384,19 +384,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -506,47 +506,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

for (auto &cell : triangulation.active_cell_iterators())
if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -557,41 +557,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

-

possibly with a coefficient inside the integral, and where $\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

-\begin{align*}
+<p> possibly with a coefficient inside the integral, and where <picture><source srcset=$\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

+\begin{align*}
        \rho \frac{\partial^2 u}{\partial t^2}
        -\nabla \cdot C \nabla u = f.
-     \end{align*} /usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-12-27 18:24:52.264760155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-12-27 18:24:52.268760182 +0000 @@ -345,7 +345,7 @@

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-12-21 15:09:27.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-12-21 15:09:27.000000000 +0000 @@ -34,9 +34,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -52,12 +49,6 @@ $\cal A$ \pagebreak -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ \pagebreak @@ -161,6 +152,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -340,12 +340,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -739,6 +733,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1474,88 +1474,88 @@ \pagebreak \[ -[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], + M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). \] \pagebreak -$(x_0^L , ..., x_{spacedim-1}^L)$ +$m$ \pagebreak -$(x_0^U , ..., x_{spacedim-1}^U)$ +\[ m = I - \Delta t M. \] \pagebreak -$x_k^L$ +$ Mu+cF(u) $ \pagebreak -$x_k^U$ +$u$ \pagebreak -$(x,z)$ +$c$ \pagebreak -$(z,x)$ +$(1-\theta) \Delta t$ \pagebreak -$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ +$ \theta \Delta t$ \pagebreak \[ - M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). +u'(t) \approx +\frac{u(t+h) - +u(t-h)}{2h}. \] \pagebreak -$m$ +\[ +u'(t) \approx +\frac{u(t) - +u(t-h)}{h}. +\] \pagebreak -\[ m = I - \Delta t M. \] +\[ +u'(t) \approx +\frac{u(t-2h) - 8u(t-h) ++ 8u(t+h) - u(t+2h)}{12h}. +\] \pagebreak -$\in [0, 2^{\text{dim}} - 1]$ +\[ +[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], +\] \pagebreak -$\hat{B}$ +$(x_0^L , ..., x_{spacedim-1}^L)$ \pagebreak -$G(B) = \hat{B}$ +$(x_0^U , ..., x_{spacedim-1}^U)$ \pagebreak -$F(\hat{B}) = B$ +$x_k^L$ \pagebreak -$ Mu+cF(u) $ +$x_k^U$ \pagebreak -$u$ +$(x,z)$ \pagebreak -$c$ +$(z,x)$ \pagebreak -$(1-\theta) \Delta t$ +$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ \pagebreak -$ \theta \Delta t$ +$\in [0, 2^{\text{dim}} - 1]$ \pagebreak -\[ -u'(t) \approx -\frac{u(t+h) - -u(t-h)}{2h}. -\] +$\hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t) - -u(t-h)}{h}. -\] +$G(B) = \hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t-2h) - 8u(t-h) -+ 8u(t+h) - u(t+2h)}{12h}. -\] +$F(\hat{B}) = B$ \pagebreak $1/h$ @@ -1828,6 +1828,9 @@ $\lambda \approx 0.54448$ \pagebreak +$\sin(\lambda\omega)+\lambda \sin(\omega)=0$ +\pagebreak + $f : \Omega \rightarrow {\mathbb R}^{n_\text{components}}$ /usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-12-21 15:09:27.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-12-21 15:09:27.000000000 +0000 @@ -36,9 +36,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -54,12 +51,6 @@ $\cal A$ \pagebreak -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ \pagebreak @@ -163,6 +154,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -342,12 +342,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -741,6 +735,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1476,88 +1476,88 @@ \pagebreak \[ -[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], + M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). \] \pagebreak -$(x_0^L , ..., x_{spacedim-1}^L)$ +$m$ \pagebreak -$(x_0^U , ..., x_{spacedim-1}^U)$ +\[ m = I - \Delta t M. \] \pagebreak -$x_k^L$ +$ Mu+cF(u) $ \pagebreak -$x_k^U$ +$u$ \pagebreak -$(x,z)$ +$c$ \pagebreak -$(z,x)$ +$(1-\theta) \Delta t$ \pagebreak -$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ +$ \theta \Delta t$ \pagebreak \[ - M u_{n+1} + \theta k F(u_{n+1}) = M u_n - (1-\theta)k F(u_n). +u'(t) \approx +\frac{u(t+h) - +u(t-h)}{2h}. \] \pagebreak -$m$ +\[ +u'(t) \approx +\frac{u(t) - +u(t-h)}{h}. +\] \pagebreak -\[ m = I - \Delta t M. \] +\[ +u'(t) \approx +\frac{u(t-2h) - 8u(t-h) ++ 8u(t+h) - u(t+2h)}{12h}. +\] \pagebreak -$\in [0, 2^{\text{dim}} - 1]$ +\[ +[x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U], +\] \pagebreak -$\hat{B}$ +$(x_0^L , ..., x_{spacedim-1}^L)$ \pagebreak -$G(B) = \hat{B}$ +$(x_0^U , ..., x_{spacedim-1}^U)$ \pagebreak -$F(\hat{B}) = B$ +$x_k^L$ \pagebreak -$ Mu+cF(u) $ +$x_k^U$ \pagebreak -$u$ +$(x,z)$ \pagebreak -$c$ +$(z,x)$ \pagebreak -$(1-\theta) \Delta t$ +$x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ \pagebreak -$ \theta \Delta t$ +$\in [0, 2^{\text{dim}} - 1]$ \pagebreak -\[ -u'(t) \approx -\frac{u(t+h) - -u(t-h)}{2h}. -\] +$\hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t) - -u(t-h)}{h}. -\] +$G(B) = \hat{B}$ \pagebreak -\[ -u'(t) \approx -\frac{u(t-2h) - 8u(t-h) -+ 8u(t+h) - u(t+2h)}{12h}. -\] +$F(\hat{B}) = B$ \pagebreak $1/h$ @@ -1830,6 +1830,9 @@ $\lambda \approx 0.54448$ \pagebreak +$\sin(\lambda\omega)+\lambda \sin(\omega)=0$ +\pagebreak + $f : \Omega \rightarrow {\mathbb R}^{n_\text{components}}$ /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-12-27 18:24:52.524761940 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-12-27 18:24:52.528761968 +0000 @@ -708,7 +708,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-12-27 18:24:52.556762160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-12-27 18:24:52.564762215 +0000 @@ -514,7 +514,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-12-27 18:24:52.596762435 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-12-27 18:24:52.600762462 +0000 @@ -852,7 +852,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-12-27 18:24:52.636762709 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-12-27 18:24:52.640762737 +0000 @@ -567,7 +567,7 @@

  • -

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    +

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    (Zhen Tao, Arezou Ghesmati, Wolfgang Bangerth, 2015/04/17)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-12-27 18:24:52.696763122 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-12-27 18:24:52.700763149 +0000 @@ -621,7 +621,7 @@

  • -

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    +

    Improved: GridGenerator::hyper_shell() in 3d now supports more n_cells options. While previously only 6, 12, or 96 cells were possible, the function now supports any number of the kind $6 \times 2^m$ with $m$ a non-negative integer. The new cases $m=2,3$ and $m\geq 5$ correspond to refinement in the azimuthal direction of the 6 or 12 cell case with a single mesh layer in radial direction, and are intended for shells that are thin and should be given more resolution in azimuthal direction.
    (Martin Kronbichler, 2020/04/07)

  • @@ -1575,7 +1575,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-12-27 18:24:52.780763699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-12-27 18:24:52.788763754 +0000 @@ -388,9 +388,9 @@

    The algorithms used in the implementation of this class are described in some detail in the hp-paper. There is also a significant amount of documentation on how to use this class in the Constraints on degrees of freedom topic.

    Description of constraints

    Each "line" in objects of this class corresponds to one constrained degree of freedom, with the number of the line being i, entered by using add_line() or add_lines(). The entries in this line are pairs of the form (j,aij), which are added by add_entry() or add_entries(). The organization is essentially a SparsityPattern, but with only a few lines containing nonzero elements, and therefore no data wasted on the others. For each line, which has been added by the mechanism above, an elimination of the constrained degree of freedom of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_j a_{ij} x_j + b_i
-\] +\]" src="form_1653.png"/>

    is performed, where bi is optional and set by set_inhomogeneity(). Thus, if a constraint is formulated for instance as a zero mean value of several degrees of freedom, one of the degrees has to be chosen to be eliminated.

    Note that the constraints are linear in the xi, and that there might be a constant (non-homogeneous) term in the constraint. This is exactly the form we need for hanging node constraints, where we need to constrain one degree of freedom in terms of others. There are other conditions of this form possible, for example for implementing mean value conditions as is done in the step-11 tutorial program. The name of the class stems from the fact that these constraints can be represented in matrix form as X x = b, and this object then describes the matrix X and the vector b. The most frequent way to create/fill objects of this type is using the DoFTools::make_hanging_node_constraints() function. The use of these objects is first explained in step-6.

    @@ -981,27 +981,27 @@

    Add a constraint to this object. This function adds a constraint of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_{j=1}^n w_j x_{k_j} + b
-\] +\]" src="form_1654.png"/>

    -

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    +

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    As an example, if you want to add the constraint

    -\[
+<picture><source srcset=\[
   x_{42} = 0.5 x_{12} + 0.5 x_{36} + 27
-\] +\]" src="form_1656.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {{12, 0.5}, {36, 0.5}}, 27.0);

    On the other hand, if (as one often wants to) you need a constraint of the kind

    -\[
+<picture><source srcset=\[
   x_{42} = 27
-\] +\]" src="form_1657.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 27.0);

    If you want to constrain a degree of freedom to zero, i.e., require that

    -\[
+<picture><source srcset=\[
   x_{42} = 0
-\] +\]" src="form_1658.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 0.0);

    That said, this special case can be achieved in a more obvious way by calling

    constraints.constrain_dof_to_zero (42);
    @@ -1026,9 +1026,9 @@

    Constrain the given degree of freedom to be zero, i.e., require a constraint like

    -\[
+<picture><source srcset=\[
   x_{42} = 0.
-\] +\]" src="form_1659.png"/>

    Calling this function is equivalent to, but more readable than, saying

    constraints.add_constraint (42, {}, 0.0);

    It is not an error to call this function more than once on the same degree of freedom, but it is an error to call this function on a degree of freedom that has previously been constrained to either a different value than zero, or to a linear combination of degrees of freedom via the add_constraint() function.

    @@ -1161,13 +1161,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -1228,11 +1228,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1260,9 +1260,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1378,7 +1378,7 @@

    This function provides a "view" into a constraint object. Specifically, given a "mask" index set that describes which constraints we are interested in, it returns an AffineConstraints object that contains only those constraints that correspond to degrees of freedom that are listed in the mask, with indices shifted so that they correspond to the position within the mask. This process is the same as how IndexSet::get_view() computes the shifted indices. The function is typically used to extract from an AffineConstraints object corresponding to a DoFHandler only those constraints that correspond to a specific variable (say, to the velocity in a Stokes system) so that the resulting AffineConstraints object can be applied to a single block of a block vector of solutions; in this case, the mask would be the index set of velocity degrees of freedom, as a subset of all degrees of freedom.

    This function can only work if the degrees of freedom selected by the mask are constrained only against other degrees of freedom that are listed in the mask. In the example above, this means that constraints for the selected velocity degrees of freedom are only against other velocity degrees of freedom, but not against any pressure degrees of freedom. If that is not so, an assertion will be triggered.

    -

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    +

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    @@ -1718,9 +1718,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1668.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2298,7 +2298,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2340,7 +2340,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2629,7 +2629,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-12-27 18:24:52.840764111 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-12-27 18:24:52.840764111 +0000 @@ -232,9 +232,9 @@

    For fixed theta, the Crank-Nicolson scheme is the only second order scheme. Nevertheless, further stability may be achieved by choosing theta larger than ½, thereby introducing a first order error term. In order to avoid a loss of convergence order, the adaptive theta scheme can be used, where theta=½+c dt.

    Assume that we want to solve the equation u' + F(u) = 0 with a step size k. A step of the theta scheme can be written as

    -\[
+<picture><source srcset=\[
   M u_{n+1} + \theta k F(u_{n+1})  = M u_n - (1-\theta)k F(u_n).
-\] +\]" src="form_351.png"/>

    Here, M is the mass matrix. We see, that the right hand side amounts to an explicit Euler step with modified step size in weak form (up to inversion of M). The left hand side corresponds to an implicit Euler step with modified step size (right hand side given). Thus, the implementation of the theta scheme will use two Operator objects, one for the explicit, one for the implicit part. Each of these will use its own TimestepData to account for the modified step sizes (and different times if the problem is not autonomous). Note that once the explicit part has been computed, the left hand side actually constitutes a linear or nonlinear system which has to be solved.

    Usage AnyData

    @@ -315,8 +315,8 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    -\[ m = I - \Delta t M. \] +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +\[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    void Explicit::operator()(AnyData &out, const AnyData &in)
    @@ -1156,7 +1156,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 415 of file theta_timestepping.h.

    @@ -1184,7 +1184,7 @@

    The operator solving the implicit part of the scheme. It will receive in its input data the vector "Previous time". Information on the timestep should be obtained from implicit_data().

    -

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    +

    Its return value is the solution u of Mu-cF(u)=f, where f is the dual space vector found in the "Previous time" entry of the input data, M the mass matrix, F the operator in space and c is the adjusted time step size $ \theta \Delta t$

    Definition at line 427 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-12-27 18:24:52.880764385 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-12-27 18:24:52.884764413 +0000 @@ -177,10 +177,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+<p>Given one-dimensional polynomials <picture><source srcset=$P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
 = P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 321 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -695,7 +695,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    Definition at line 713 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-12-27 18:24:52.920764660 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-12-27 18:24:52.924764688 +0000 @@ -243,14 +243,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -523,7 +523,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-12-27 18:24:52.980765072 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-12-27 18:24:52.988765127 +0000 @@ -1027,7 +1027,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-12-27 18:24:53.036765457 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAutoDerivativeFunction.html 2024-12-27 18:24:53.040765484 +0000 @@ -365,27 +365,27 @@

    Names of difference formulas.

    Enumerator
    Euler 

    The symmetric Euler formula of second order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

    UpwindEuler 

    The upwind Euler formula of first order:

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

    FourthOrder 

    The fourth order scheme

    -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-12-27 18:24:53.068765676 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-12-27 18:24:53.076765731 +0000 @@ -166,7 +166,7 @@ (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2). \]" src="form_638.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    \[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-12-27 18:24:53.108765951 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-12-27 18:24:53.112765978 +0000
@@ -169,8 +169,8 @@
 <a name=

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 43 of file qr.h.

    Member Typedef Documentation

    @@ -413,7 +413,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -447,7 +447,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -481,7 +481,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -515,7 +515,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-12-27 18:24:53.148766226 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-12-27 18:24:53.156766281 +0000 @@ -223,7 +223,7 @@ void swap (BlockIndices &u, BlockIndices &v) noexcept &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-12-27 18:24:53.220766720 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-12-27 18:24:53.224766747 +0000 @@ -800,9 +800,9 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -843,9 +843,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -1543,7 +1543,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1556,60 +1556,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

    @@ -1655,10 +1655,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1681,8 +1681,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

    See also
    Block (linear algebra)
    @@ -1705,7 +1705,7 @@
    -

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a column (i.e, the number of "block rows", or the number $m$, if interpreted as a $m\times n$ block system).

    Definition at line 296 of file block_linear_operator.h.

    @@ -1724,7 +1724,7 @@
    -

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    Definition at line 302 of file block_linear_operator.h.

    @@ -1743,7 +1743,7 @@
    -

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    +

    Access the block with the given coordinates. This std::function object returns a LinearOperator representing the $(i,j)$-th block of the BlockLinearOperator.

    Definition at line 309 of file block_linear_operator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-12-27 18:24:53.280767133 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-12-27 18:24:53.284767160 +0000 @@ -1309,7 +1309,7 @@ const BlockVectorType & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1757,7 +1757,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -1881,7 +1881,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-12-27 18:24:53.356767654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-12-27 18:24:53.360767682 +0000 @@ -954,7 +954,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 398 of file block_sparse_matrix.h.

    @@ -1082,7 +1082,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 442 of file block_sparse_matrix.h.

    @@ -2079,7 +2079,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2627,7 +2627,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2735,7 +2735,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-12-27 18:24:53.408768012 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-12-27 18:24:53.412768039 +0000 @@ -767,7 +767,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 370 of file block_sparse_matrix_ez.h.

    @@ -792,7 +792,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 408 of file block_sparse_matrix_ez.h.

    @@ -817,7 +817,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 390 of file block_sparse_matrix_ez.h.

    @@ -842,7 +842,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 428 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-12-27 18:24:53.488768561 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-12-27 18:24:53.492768588 +0000 @@ -1825,7 +1825,7 @@
    -

    $U = U * V$: scalar product.

    +

    $U = U * V$: scalar product.

    @@ -1851,7 +1851,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1903,7 +1903,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1929,7 +1929,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1955,7 +1955,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    @@ -1990,7 +1990,7 @@
    return_value = *this * W;
    void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

    The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

    -

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    +

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    @@ -2237,7 +2237,7 @@
    -

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    +

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-12-27 18:24:53.544768945 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-12-27 18:24:53.552769000 +0000 @@ -1257,7 +1257,7 @@
    -

    $U = U * V$: scalar product.

    +

    $U = U * V$: scalar product.

    @@ -1277,7 +1277,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1317,7 +1317,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1337,7 +1337,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1357,7 +1357,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    @@ -1387,7 +1387,7 @@
    return_value = *this * W;
    void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

    The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

    -

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    +

    For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

    @@ -1587,7 +1587,7 @@
    -

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    +

    $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-12-27 18:24:53.596769302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-12-27 18:24:53.600769330 +0000 @@ -179,11 +179,11 @@ &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim, typename Number = double>
    class BoundingBox< spacedim, Number >

    A class that represents a box of arbitrary dimension spacedim and with sides parallel to the coordinate axes, that is, a region

    -\[
+<picture><source srcset=\[
 [x_0^L, x_0^U] \times ... \times [x_{spacedim-1}^L, x_{spacedim-1}^U],
-\] +\]" src="form_362.png"/>

    -

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    +

    where $(x_0^L , ..., x_{spacedim-1}^L)$ and $(x_0^U , ..., x_{spacedim-1}^U)$ denote the two vertices (bottom left and top right) which are used to represent the box. The quantities $x_k^L$ and $x_k^U$ denote the "lower" and "upper" bounds of values that are within the box for each coordinate direction $k$.

    Geometrically, a bounding box is thus:

    Bounding boxes are, for example, useful in parallel distributed meshes to give a general description of the owners of each portion of the mesh. More generally, bounding boxes are often used to roughly describe a region of space in which an object is contained; if a candidate point is not within the bounding box (a test that is cheap to execute), then it is not necessary to perform an expensive test whether the candidate point is in fact inside the object itself. Bounding boxes are therefore often used as a first, cheap rejection test before more detailed checks. As such, bounding boxes serve many of the same purposes as the convex hull, for which it is also relatively straightforward to compute whether a point is inside or outside, though not quite as cheap as for the bounding box.

    -

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    +

    Taking the cross section of a BoundingBox<spacedim> orthogonal to a given direction gives a box in one dimension lower: BoundingBox<spacedim - 1>. In 3d, the 2 coordinates of the cross section of BoundingBox<3> can be ordered in 2 different ways. That is, if we take the cross section orthogonal to the y direction we could either order a 3d-coordinate into a 2d-coordinate as $(x,z)$ or as $(z,x)$. This class uses the second convention, corresponding to the coordinates being ordered cyclicly $x \rightarrow y \rightarrow z \rightarrow x \rightarrow ... $ To be precise, if we take a cross section:

    @@ -744,7 +744,7 @@
    Orthogonal to Cross section coordinates ordered as
    -

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    +

    Returns the indexth vertex of the box. Vertex is meant in the same way as for a cell, so that index $\in [0, 2^{\text{dim}} - 1]$.

    Definition at line 232 of file bounding_box.cc.

    @@ -812,7 +812,7 @@

    Apply the affine transformation that transforms this BoundingBox to a unit BoundingBox object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    Definition at line 311 of file bounding_box.cc.

    @@ -835,7 +835,7 @@

    Apply the affine transformation that transforms the unit BoundingBox object to this object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    Definition at line 326 of file bounding_box.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-12-27 18:24:53.644769632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-12-27 18:24:53.648769659 +0000 @@ -793,7 +793,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 511 of file cuda_sparse_matrix.cc.

    @@ -816,7 +816,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 529 of file cuda_sparse_matrix.cc.

    @@ -839,7 +839,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 547 of file cuda_sparse_matrix.cc.

    @@ -862,7 +862,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 565 of file cuda_sparse_matrix.cc.

    @@ -884,7 +884,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 583 of file cuda_sparse_matrix.cc.

    @@ -936,8 +936,8 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & b&#href_anchor"memdoc"> -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 610 of file cuda_sparse_matrix.cc.

    @@ -959,8 +959,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 625 of file cuda_sparse_matrix.cc.

    @@ -982,8 +982,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 644 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-12-27 18:24:53.692769962 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-12-27 18:24:53.696769989 +0000 @@ -206,37 +206,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1502.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1503.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 907 of file manifold.h.

    Member Typedef Documentation

    @@ -561,7 +561,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -595,24 +595,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -621,11 +621,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-12-27 18:24:53.756770401 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-12-27 18:24:53.764770456 +0000 @@ -1049,7 +1049,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1380,7 +1380,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1454,8 +1454,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1475,8 +1475,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -2175,7 +2175,7 @@
    -

    Return the location of entry $(i,j)$ within the val array.

    +

    Return the location of entry $(i,j)$ within the val array.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-12-27 18:24:53.820770841 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-12-27 18:24:53.824770869 +0000 @@ -1136,7 +1136,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 519 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-12-27 18:24:53.868771171 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-12-27 18:24:53.876771226 +0000 @@ -579,24 +579,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -605,11 +605,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-12-27 18:24:53.928771583 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-12-27 18:24:53.936771638 +0000 @@ -414,7 +414,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, dim, 3 >.

    @@ -446,7 +446,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1111 of file manifold_lib.cc.

    @@ -476,7 +476,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1131 of file manifold_lib.cc.

    @@ -726,7 +726,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -756,24 +756,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -782,11 +782,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-12-27 18:24:53.968771857 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-12-27 18:24:53.972771885 +0000 @@ -196,7 +196,7 @@

    As a consequence, DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    • If a solution vector is complex-valued, then this results in at least two input components at each evaluation point. As a consequence, the DataPostprocessor::evaluate_scalar_field() function is never called, even if the underlying finite element had only a single solution component. Instead, DataOut will always call DataPostprocessor::evaluate_vector_field().
    • -
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.
    • +
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-12-27 18:24:54.012772160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-12-27 18:24:54.012772160 +0000 @@ -269,7 +269,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-12-27 18:24:54.052772434 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-12-27 18:24:54.060772489 +0000 @@ -260,7 +260,7 @@

    In the second image, the background color corresponds to the magnitude of the gradient vector and the vector glyphs to the gradient itself. It may be surprising at first to see that from each vertex, multiple vectors originate, going in different directions. But that is because the solution is only continuous: in general, the gradient is discontinuous across edges, and so the multiple vectors originating from each vertex simply represent the differing gradients of the solution at each adjacent cell.

    -

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    +

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    class HeatFluxPostprocessor : public DataPostprocessorVector<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-12-27 18:24:54.084772654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-12-27 18:24:54.092772709 +0000 @@ -248,7 +248,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 490 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-12-27 18:24:54.112772846 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-12-27 18:24:54.120772901 +0000 @@ -243,7 +243,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 629 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-12-27 18:24:54.156773148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-12-27 18:24:54.156773148 +0000 @@ -497,7 +497,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -517,7 +517,7 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+<p>Assuming that the current object stores the Jacobian of a mapping <picture><source srcset=$\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
 F$ is a square matrix (i.e., $\mathbf F:
 {\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -634,7 +634,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -768,7 +768,7 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html	2024-12-27 18:24:54.208773505 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html	2024-12-27 18:24:54.212773533 +0000
@@ -573,7 +573,7 @@
   </tr>
 </table>
 </div><div class= -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
@@ -618,7 +618,7 @@
   </tr>
 </table>
 </div><div class= -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html	2024-12-27 18:24:54.244773753 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html	2024-12-27 18:24:54.252773807 +0000
@@ -202,7 +202,7 @@
 <p>Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.</p>
 <ul>
 <li>The <b>snapshot</b> stage (the <b>current</b> stage, the <b>consistent</b> stage): In this part of the algorithm, we are at <picture><source srcset=$t = t_n$ and all quantities of the simulation (displacements, strains, temperatures, etc.) are up-to-date for $t = t_n$. In this stage, current time refers to $t_n$, next time refers to $t_{n+1}$, previous time refers to $t_{n-1}$. The other useful notation quantities are the next time step size $t_{n+1} - t_n$ and previous time step size $t_n - t_{n-1}$. In this stage, it is a perfect occasion to generate text output using print commands within the user's code. Additionally, post-processed outputs can be prepared here, which can then later be viewed by visualization programs such as Tecplot, Paraview, and VisIt. Additionally, during the snapshot stage, the code can assess the quality of the previous step and decide whether it wants to increase or decrease the time step size. The step size for the next time step can be modified here, by calling set_desired_next_step_size(). -

  • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

    +
  • The update stage (the transition stage, the inconsistent stage): In this section of the program, the internal state of the simulation is getting updated from $t_n$ to $t_{n+1}$. All of the variables need to be updated one by one, the step number is incremented, the time is incremented by $dt = t_{n+1} - t_n$, and time-integration algorithms are used to update the other simulation quantities. In the middle of this stage, some variables have been updated to $t_{n+1}$ but other variables still represent their value at $t_n$. Thus, we call this the inconsistent stage, requiring that no post-processing output related to the state variables take place within it. The state variables, namely those related to time, the solution field and any internal variables, are not synchronized and then get updated one by one. In general, the order of updating variables is arbitrary, but some care should be taken if there are interdependencies between them. For example, if some variable such as $x$ depends on the calculation of another variable such as $y$, then $y$ must be updated before $x$ can be updated.

    The question arises whether time should be incremented before updating state quantities. Multiple possibilities exist, depending on program and formulation requirements, and possibly the programmer's preferences:

    • Time is incremented before the rest of the updates. In this case, even though time is incremented to $t_{n+1}$, not all variables are updated yet. During this update phase, $dt$ equals the previous time step size. Previous means that it is referring to the $dt$ of the advance_time() command that was performed previously. In the following example code, we are assuming that a and b are two state variables that need to be updated in this time step.
      time.advance_time();
      new_a = update_a(a, b, time.get_previous_step_size());
      /usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-12-27 18:24:54.328774329 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-12-27 18:24:54.332774357 +0000 @@ -422,7 +422,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class DoFHandler< dim, spacedim >

      Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

      +class DoFHandler< dim, spacedim >

      Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

      It is first used in the step-2 tutorial program.

      For each 0d, 1d, 2d, and 3d subobject, this class stores a list of the indices of degrees of freedom defined on this DoFHandler. These indices refer to the unconstrained degrees of freedom, i.e. constrained degrees of freedom are numbered in the same way as unconstrained ones, and are only later eliminated. This leads to the fact that indices in global vectors and matrices also refer to all degrees of freedom and some kind of condensation is needed to restrict the systems of equations to the unconstrained degrees of freedom only. The actual layout of storage of the indices is described in the internal::DoFHandlerImplementation::DoFLevel class documentation.

      The class offers iterators to traverse all cells, in much the same way as the Triangulation class does. Using the begin() and end() functions (and companions, like begin_active()), one can obtain iterators to walk over cells, and query the degree of freedom structures as well as the triangulation data. These iterators are built on top of those of the Triangulation class, but offer the additional information on degrees of freedom functionality compared to pure triangulation iterators. The order in which dof iterators are presented by the ++ and -- operators is the same as that for the corresponding iterators traversing the triangulation on which this DoFHandler is constructed.

      @@ -439,7 +439,7 @@

      Like many other classes in deal.II, the DoFHandler class can stream its contents to an archive using BOOST's serialization facilities. The data so stored can later be retrieved again from the archive to restore the contents of this object. This facility is frequently used to save the state of a program to disk for possible later resurrection, often in the context of checkpoint/restart strategies for long running computations or on computers that aren't very reliable (e.g. on very large clusters where individual nodes occasionally fail and then bring down an entire MPI job).

      The model for doing so is similar for the DoFHandler class as it is for the Triangulation class (see the section in the general documentation of that class). In particular, the load() function does not exactly restore the same state as was stored previously using the save() function. Rather, the function assumes that you load data into a DoFHandler object that is already associated with a triangulation that has a content that matches the one that was used when the data was saved. Likewise, the load() function assumes that the current object is already associated with a finite element object that matches the one that was associated with it when data was saved; the latter can be achieved by calling DoFHandler::distribute_dofs() using the same kind of finite element before re-loading data from the serialization archive.

      hp-adaptive finite element methods

      -

      Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

      +

      Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

      The whole process of working with objects of this type is explained in step-27. Many of the algorithms this class implements are described in the hp-paper.

      Active FE indices and their behavior under mesh refinement

      The typical workflow for using this class is to create a mesh, assign an active FE index to every active cell, call DoFHandler::distribute_dofs(), and then assemble a linear system and solve a problem on this finite element space.

      @@ -988,7 +988,7 @@
      -

      Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

      +

      Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

      The exact order in which degrees of freedom on a mesh are ordered, i.e., the order in which basis functions of the finite element space are enumerated, is something that deal.II treats as an implementation detail. By and large, degrees of freedom are enumerated in the same order in which we traverse cells, but you should not rely on any specific numbering. In contrast, if you want a particular ordering, use the functions in namespace DoFRenumbering.

      This function is first discussed in the introduction to the step-2 tutorial program.

      Note
      This function makes a copy of the finite element given as argument, and stores it as a member variable, similarly to the above function set_fe().
      /usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-12-27 18:24:54.380774687 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-12-27 18:24:54.388774742 +0000 @@ -1119,7 +1119,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

      Definition at line 566 of file dynamic_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-12-27 18:24:54.428775017 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-12-27 18:24:54.432775044 +0000 @@ -204,7 +204,7 @@
      template<typename VectorType = Vector<double>>
      class EigenInverse< VectorType >

      Inverse iteration (Wieland) for eigenvalue computations.

      This class implements an adaptive version of the inverse iteration by Wieland.

      -

      There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

      +

      There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

      Usually, the initial guess entering this method is updated after each step, replacing it with the new approximation of the eigenvalue. Using a parameter AdditionalData::relaxation between 0 and 1, this update can be damped. With relaxation parameter 0, no update is performed. This damping allows for slower adaption of the shift value to make sure that the method converges to the eigenvalue closest to the initial guess. This can be aided by the parameter AdditionalData::start_adaption, which indicates the first iteration step in which the shift value should be adapted.

      Definition at line 128 of file eigen.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-12-27 18:24:54.460775236 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-12-27 18:24:54.468775291 +0000 @@ -203,7 +203,7 @@

      Detailed Description

      template<typename VectorType = Vector<double>>
      class EigenPower< VectorType >

      Power method (von Mises) for eigenvalue computations.

      -

      This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

      +

      This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

      A shift parameter allows to shift the spectrum, so it is possible to compute the smallest eigenvalue, too.

      Convergence of this method is known to be slow.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-12-27 18:24:54.512775593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-12-27 18:24:54.512775593 +0000 @@ -233,15 +233,15 @@
      template<int dim, int spacedim = dim>
      class EllipticalManifold< dim, spacedim >

      Elliptical manifold description derived from ChartManifold. More information on the elliptical coordinate system can be found at Wikipedia .

      This is based on the definition of elliptic coordinates $(u,v)$

      -\[
+<picture><source srcset=\[
  \left\lbrace\begin{aligned}
  x &=  x_0 + c \cosh(u) \cos(v) \\
  y &=  y_0 + c \sinh(u) \sin(v)
  \end{aligned}\right.
-\] +\]" src="form_1530.png"/>

      -

      in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

      -

      The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

      +

      in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

      +

      The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

      The constructor of this class will throw an exception if both dim and spacedim are different from two.

      This manifold can be used to produce hyper_shells with elliptical curvature. As an example, the test elliptical_manifold_01 produces the following triangulation:

      @@ -352,7 +352,7 @@ - +
      centerCenter of the manifold.
      major_axis_directionDirection of the major axis of the manifold.
      eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
      eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
  • @@ -489,7 +489,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -614,7 +614,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1221 of file manifold_lib.cc.

    @@ -830,7 +830,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -862,24 +862,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -888,11 +888,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-12-27 18:24:54.556775896 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-12-27 18:24:54.560775923 +0000 @@ -178,44 +178,44 @@ class FECouplingValues< dim1, dim2, spacedim >

    FECouplingValues is a class that facilitates the integration of finite element data between two different finite element objects, possibly living on different grids, and with possibly different topological dimensions (i.e., cells, faces, edges, and any combination thereof).

    This class provides a way to simplify the implementation of the following abstract operation:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
-\] +\]" src="form_1096.png"/>

    for three different types of Kernels $K$:

      -
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.
    • +
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.

    For the first case, one may think that the only natural way to proceed is to compute the double integral by simply nesting two loops:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{q_1} \sum_{q_2} K(x_1^{q_1}, x_2^{q_2}) \phi^1_i(x_1^{q_1})
 \phi^2_j(x_2^{q_2}) w_1^{q_1} w_2^{q_2},
-\] +\]" src="form_1100.png"/>

    -

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    -

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
-T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    +

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    +

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
+T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    This class allows one to treat the three cases above in the same way, and to approximate the integral as follows:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{i=1}^{N_q} K(x_1^{i}, x_2^{i}) \phi^1_i(x_1^{i})
 \phi^2_j(x_2^{i}) w_1^{i} w_2^i,
-\] +\]" src="form_1107.png"/>

    -

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    -

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    +

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    +

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    Similarly, this class can be used to couple bulk and surface meshes across the faces of the bulk mesh. In this case, the two FEValuesBase objects will have different topological dimension (i.e., one will be a cell in a co-dimension one triangulation, and the other a face of a bulk grid with co-dimension zero), and the QuadratureCouplingType argument is usually chosen to be QuadratureCouplingType::reorder, since the quadrature points of the two different FEValuesBase objects are not necessarily generated with the same ordering.

    The type of integral to compute is controlled by the QuadratureCouplingType argument (see the documentation of that enum class for more details), while the type degrees of freedom coupling is controlled by the DoFCouplingType argument (see the documentation of that enum class for more details).

    As an example usage of this class, consider the a bilinear form of the form:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K_1(x_1, x_2) v_i(x_1) u_j(x_2) dT_1 dT_2 +
 \int_{T_1} \int{T_2} K_2(x_1, x_2) p_i(x_1) q_j(x_2) dT_1 dT_2
-\] +\]" src="form_1111.png"/>

    -

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    +

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    The corresponding implementation would look like the following:

    ... // double loop over cells that yields cell_1 and cell_2
    @@ -338,9 +338,9 @@

    Construct the FECouplingValues with two arbitrary FEValuesBase objects. This class assumes that the FEValuesBase objects that are given at construction time are initialized and ready to use (i.e., that you have called the reinit() function on them before calling this constructor).

    Notice that the actual renumbering of the degrees of freedom and quadrature points is done at construction time, or upon calling the reinit() function. If you change the underlying FEValuesBase objects after construction, you must call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -390,9 +390,9 @@

    Reinitialize the FECouplingValues with two arbitrary FEValuesBase objects. The FEValuesBase objects must be initialized and ready to use, i.e., you must have called the reinit() function on them before calling this method.

    This method computes the actual renumbering of the degrees of freedom and quadrature points. If you change the underlying FEValuesBase objects after calling this method, you may need to call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -489,8 +489,8 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
-T_2$.

    +

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
+T_2$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_quadrature_points flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-12-27 18:24:54.668776665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-12-27 18:24:54.676776720 +0000 @@ -463,7 +463,7 @@
    VectorizedArray
    Definition vectorization.h:445
    EvaluationFlags::values
    @ values
    Definition evaluation_flags.h:50

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    FEEvaluation<dim,fe_degree> fe_eval(matrix_free);
    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1789,8 +1789,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2114,7 +2114,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2486,8 +2486,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-12-27 18:24:54.772777379 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-12-27 18:24:54.776777406 +0000 @@ -1131,8 +1131,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -1402,7 +1402,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -1801,8 +1801,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-12-27 18:24:54.844777873 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-12-27 18:24:54.852777928 +0000 @@ -785,8 +785,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-12-27 18:24:54.956778642 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-12-27 18:24:54.960778669 +0000 @@ -1717,8 +1717,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2078,7 +2078,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2472,8 +2472,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-12-27 18:24:55.136779879 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-12-27 18:24:55.144779934 +0000 @@ -963,7 +963,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1004,7 +1004,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1043,7 +1043,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1251,11 +1251,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 716 of file fe_values_base.cc.

    @@ -1289,7 +1289,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 763 of file fe_values_base.cc.

    @@ -1466,11 +1466,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1506,7 +1506,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 898 of file fe_values_base.cc.

    @@ -1625,11 +1625,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1670,7 +1670,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1001 of file fe_values_base.cc.

    @@ -1789,11 +1789,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1831,7 +1831,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1994,11 +1994,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2039,7 +2039,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1234 of file fe_values_base.cc.

    @@ -2376,7 +2376,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2433,7 +2433,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2491,7 +2491,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2549,7 +2549,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2607,7 +2607,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2665,8 +2665,8 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-12-27 18:24:55.244780620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-12-27 18:24:55.248780648 +0000 @@ -667,7 +667,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -706,7 +706,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -743,7 +743,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -937,11 +937,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 487 of file fe_values_base.cc.

    @@ -971,7 +971,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 505 of file fe_values_base.cc.

    @@ -1132,11 +1132,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1168,7 +1168,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 683 of file fe_values_base.cc.

    @@ -1275,11 +1275,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1316,7 +1316,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 786 of file fe_values_base.cc.

    @@ -1423,11 +1423,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1461,7 +1461,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1608,11 +1608,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1649,7 +1649,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1006 of file fe_values_base.cc.

    @@ -1964,7 +1964,7 @@

    Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

    For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2017,7 +2017,7 @@
    -

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    +

    Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2071,7 +2071,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2125,7 +2125,7 @@
    -

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    +

    Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2179,7 +2179,7 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2233,8 +2233,8 @@
    -

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

    +

    Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-12-27 18:24:55.320781142 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-12-27 18:24:55.324781170 +0000 @@ -496,9 +496,9 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • -
    • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • +
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -834,7 +834,7 @@

      Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1104,9 +1104,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -1135,9 +1135,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -1166,9 +1166,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

      +

      Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
      @@ -1197,9 +1197,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
      @@ -1228,9 +1228,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

      +

      Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -1259,9 +1259,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

      +

      Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
      @@ -1290,10 +1290,10 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      -

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

      +

      If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

      Note
      The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-12-27 18:24:55.380781554 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-12-27 18:24:55.372781499 +0000 @@ -475,7 +475,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -499,7 +499,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -523,8 +523,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
      @@ -548,8 +548,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
      @@ -573,7 +573,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -597,7 +597,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      +

      Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
      @@ -621,9 +621,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
      @@ -659,7 +659,7 @@

      Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -732,7 +732,7 @@

      Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -786,7 +786,7 @@

      Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -840,7 +840,7 @@

      Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -894,7 +894,7 @@

      Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -948,7 +948,7 @@

      Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1002,7 +1002,7 @@

      Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1056,7 +1056,7 @@

      Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-12-27 18:24:55.432781911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-12-27 18:24:55.440781966 +0000 @@ -480,7 +480,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
      @@ -504,8 +504,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
      @@ -551,8 +551,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
      @@ -598,8 +598,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +

      Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
      @@ -623,8 +623,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
      @@ -648,8 +648,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      +

      Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

      Note
      The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
      @@ -673,9 +673,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

      Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

      Note
      The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
      @@ -733,7 +733,7 @@

      Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -806,7 +806,7 @@

      Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -860,7 +860,7 @@

      Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -914,7 +914,7 @@

      Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -968,7 +968,7 @@

      Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1022,7 +1022,7 @@

      Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1076,7 +1076,7 @@

      Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1130,7 +1130,7 @@

      Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-12-27 18:24:55.616783175 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-12-27 18:24:55.624783230 +0000 @@ -212,25 +212,25 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FESeries::Fourier< dim, spacedim >

      A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

      -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

      +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1252.png"/>

      -

      Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

      +

      Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

      The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

      -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1254.png"/>

      From the orthogonality property of the basis, it follows that

      -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1255.png"/>

      -

      It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

      +

      It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

      Definition at line 89 of file fe_series.h.

      Member Typedef Documentation

      @@ -835,7 +835,7 @@
      -

      Angular frequencies $ 2 \pi {\bf k} $ .

      +

      Angular frequencies $ 2 \pi {\bf k} $ .

      Definition at line 195 of file fe_series.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-12-27 18:24:55.660783477 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-12-27 18:24:55.664783505 +0000 @@ -209,39 +209,39 @@
      template<int dim, int spacedim = dim>
      class FESeries::Legendre< dim, spacedim >

      A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

      Legendre functions are solutions to Legendre's differential equation

      -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1261.png"/>

      and can be expressed using Rodrigues' formula

      -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1262.png"/>

      -

      These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

      -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

      +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1265.png"/>

      -

      and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

      -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

      +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1267.png"/>

      -

      An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

      -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

      +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1268.png"/>

      From the orthogonality property of the basis, it follows that

      -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1269.png"/>

      -

      This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

      +

      This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

      Definition at line 259 of file fe_series.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-12-27 18:24:55.768784219 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-12-27 18:24:55.772784247 +0000 @@ -991,7 +991,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -1032,7 +1032,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -1071,7 +1071,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -1279,11 +1279,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 716 of file fe_values_base.cc.

      @@ -1317,7 +1317,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 763 of file fe_values_base.cc.

      @@ -1494,11 +1494,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1534,7 +1534,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 898 of file fe_values_base.cc.

      @@ -1653,11 +1653,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1698,7 +1698,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1001 of file fe_values_base.cc.

      @@ -1817,11 +1817,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1859,7 +1859,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2022,11 +2022,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2067,7 +2067,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1234 of file fe_values_base.cc.

      @@ -2404,7 +2404,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2461,7 +2461,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2519,7 +2519,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2577,7 +2577,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2635,7 +2635,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2693,8 +2693,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-12-27 18:24:55.936785373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-12-27 18:24:55.936785373 +0000 @@ -520,11 +520,11 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FESystem< dim, spacedim >

      This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

      FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
      +class FESystem< dim, spacedim >

      This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

      FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
      FE_Q<dim>(1)); // pressure component
      Definition fe_q.h:554
      -

      The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

      +

      The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

      Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

      FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);

      where now each (vector) component of the combined element corresponds to a $Q_1$ space.

      To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

      @@ -3817,7 +3817,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3919,7 +3919,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4123,8 +4123,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-12-27 18:24:56.044786114 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-12-27 18:24:56.044786114 +0000 @@ -761,7 +761,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -800,7 +800,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -837,7 +837,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -1031,11 +1031,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 487 of file fe_values_base.cc.

      @@ -1065,7 +1065,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 505 of file fe_values_base.cc.

      @@ -1226,11 +1226,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1262,7 +1262,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 683 of file fe_values_base.cc.

      @@ -1369,11 +1369,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1410,7 +1410,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 786 of file fe_values_base.cc.

      @@ -1517,11 +1517,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1555,7 +1555,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1702,11 +1702,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1743,7 +1743,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1006 of file fe_values_base.cc.

      @@ -2058,7 +2058,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2111,7 +2111,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2165,7 +2165,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2219,7 +2219,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2273,7 +2273,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2327,8 +2327,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-12-27 18:24:56.144786801 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-12-27 18:24:56.148786828 +0000 @@ -650,7 +650,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -684,7 +684,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -716,7 +716,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -882,11 +882,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 716 of file fe_values_base.cc.

      @@ -913,7 +913,7 @@ std::vector< Vector< Number > > & values&#href_anchor"memdoc">

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 763 of file fe_values_base.cc.

      @@ -1061,11 +1061,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1094,7 +1094,7 @@ std::vector< std::vector< Tensor< 1, spacedim, Number > > > & gradients&#href_anchor"memdoc">

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 898 of file fe_values_base.cc.

      @@ -1192,11 +1192,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1230,7 +1230,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1001 of file fe_values_base.cc.

      @@ -1328,11 +1328,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1363,7 +1363,7 @@ std::vector< Vector< Number > > & laplacians&#href_anchor"memdoc">

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1497,11 +1497,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1535,7 +1535,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1234 of file fe_values_base.cc.

      @@ -1800,7 +1800,7 @@

      Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

      For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

      -

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      +

      You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1841,7 +1841,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1883,7 +1883,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1925,7 +1925,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1967,7 +1967,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2009,8 +2009,8 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
-(J_{kK})^{-1}(J_{lL})^{-1}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijkl}=\frac{d^2J_{iJ}}{d\hat x_K d\hat x_L} (J_{jJ})^{-1}
+(J_{kK})^{-1}(J_{lL})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-12-27 18:24:56.184787075 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-12-27 18:24:56.192787130 +0000 @@ -377,7 +377,7 @@

      Return the values of the underlying view characterized by fe_function at the renumbered quadrature points.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected view.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -446,7 +446,7 @@

      Return the gradients of the underlying view characterized by fe_function at the renumbered quadrature points.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected view.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-12-27 18:24:56.228787378 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-12-27 18:24:56.236787433 +0000 @@ -708,7 +708,7 @@

      Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 361 of file fe_values_views.cc.

      @@ -781,7 +781,7 @@

      Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 412 of file fe_values_views.cc.

      @@ -840,7 +840,7 @@

      Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 464 of file fe_values_views.cc.

      @@ -899,7 +899,7 @@

      Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

      This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 516 of file fe_values_views.cc.

      @@ -958,7 +958,7 @@

      Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 568 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.268787653 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.272787680 +0000 @@ -168,9 +168,9 @@

      Detailed Description

      template<int dim, int spacedim>
      class FEValuesViews::SymmetricTensor< 2, dim, spacedim >

      A class representing a view to a set of (dim*dim + dim)/2 components forming a symmetric second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      -

      This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
-i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
-\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

      +

      This class allows to query the value and divergence of (components of) shape functions and solutions representing symmetric tensors. The divergence of a symmetric tensor $S_{ij}, 0\le i,j<\text{dim}$ is defined as $d_i = \sum_j \frac{\partial S_{ij}}{\partial x_j}, 0\le
+i<\text{dim}$, which due to the symmetry of the tensor is also $d_i =
+\sum_j \frac{\partial S_{ji}}{\partial x_j}$. In other words, it due to the symmetry of $S$ it does not matter whether we apply the nabla operator by row or by column to get the divergence.

      You get an object of this type if you apply a FEValuesExtractors::SymmetricTensor to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 1315 of file fe_values_views.h.

      @@ -497,7 +497,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1049 of file fe_values_views.cc.

      @@ -571,7 +571,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      See the general discussion of this class for a definition of the divergence.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1099 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.304787900 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:24:56.304787900 +0000 @@ -181,8 +181,8 @@

      Detailed Description

      template<int dim, int spacedim>
      class FEValuesViews::Tensor< 2, dim, spacedim >

      A class representing a view to a set of dim*dim components forming a second-order tensor from a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      -

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
-\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

      +

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing tensors. The divergence of a tensor $T_{ij},\, 0\le i,j<\text{dim}$ is defined as $d_i =
+\sum_j \frac{\partial T_{ij}}{\partial x_j}, \, 0\le i<\text{dim}$, whereas its gradient is $G_{ijk} = \frac{\partial T_{ij}}{\partial x_k}$.

      You get an object of this type if you apply a FEValuesExtractors::Tensor to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 1624 of file fe_values_views.h.

      @@ -603,7 +603,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1153 of file fe_values_views.cc.

      @@ -677,7 +677,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      See the general discussion of this class for a definition of the divergence.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1203 of file fe_values_views.cc.

      @@ -736,7 +736,7 @@

      Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      See the general discussion of this class for a definition of the gradient.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1256 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-12-27 18:24:56.352788230 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-12-27 18:24:56.356788257 +0000 @@ -236,8 +236,8 @@
      template<int dim, int spacedim = dim>
      class FEValuesViews::Vector< dim, spacedim >

      A class representing a view to a set of spacedim components forming a vector part of a vector-valued finite element. Views are discussed in the Handling vector valued problems topic.

      Note that in the current context, a vector is meant in the sense physics uses it: it has spacedim components that behave in specific ways under coordinate system transformations. Examples include velocity or displacement fields. This is opposed to how mathematics uses the word "vector" (and how we use this word in other contexts in the library, for example in the Vector class), where it really stands for a collection of numbers. An example of this latter use of the word could be the set of concentrations of chemical species in a flame; however, these are really just a collection of scalar variables, since they do not change if the coordinate system is rotated, unlike the components of a velocity vector, and consequently, this class should not be used for this context.

      -

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
-\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

      +

      This class allows to query the value, gradient and divergence of (components of) shape functions and solutions representing vectors. The gradient of a vector $d_{k}, 0\le k<\text{dim}$ is defined as $S_{ij} =
+\frac{\partial d_{i}}{\partial x_j}, 0\le i,j<\text{dim}$.

      You get an object of this type if you apply a FEValuesExtractors::Vector to an FEValues, FEFaceValues or FESubfaceValues object.

      Definition at line 597 of file fe_values_views.h.

      @@ -820,7 +820,7 @@ const unsigned int q_point&#href_anchor"memdoc">

      Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

      The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

      +(\nabla \phi_i(x_q))^T]$" src="form_1335.png"/>, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

      Note
      The meaning of the arguments is as documented for the value() function.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -958,7 +958,7 @@

      Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 622 of file fe_values_views.cc.

      @@ -1031,7 +1031,7 @@

      Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 672 of file fe_values_views.cc.

      @@ -1092,7 +1092,7 @@

      The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
 v^T)$.

      Note
      There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
      -

      The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 724 of file fe_values_views.cc.

      @@ -1151,7 +1151,7 @@

      Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      -

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 778 of file fe_values_views.cc.

      @@ -1210,7 +1210,7 @@

      Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

      -

      The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 831 of file fe_values_views.cc.

      @@ -1269,7 +1269,7 @@

      Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 883 of file fe_values_views.cc.

      @@ -1328,7 +1328,7 @@

      Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

      This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

      -

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 935 of file fe_values_views.cc.

      @@ -1387,7 +1387,7 @@

      Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

      This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

      -

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      +

      The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 995 of file fe_values_views.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-12-27 18:24:56.496789218 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-12-27 18:24:56.508789301 +0000 @@ -764,11 +764,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3374,7 +3374,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3476,7 +3476,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3766,8 +3766,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-12-27 18:24:56.664790372 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-12-27 18:24:56.668790400 +0000 @@ -740,11 +740,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3312,7 +3312,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3414,7 +3414,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3704,8 +3704,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-12-27 18:24:56.808791361 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-12-27 18:24:56.816791416 +0000 @@ -740,11 +740,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3282,7 +3282,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3384,7 +3384,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3674,8 +3674,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-12-27 18:24:56.960792405 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-12-27 18:24:56.968792460 +0000 @@ -2384,17 +2384,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2427,21 +2427,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3518,7 +3518,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3620,7 +3620,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3881,8 +3881,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3916,11 +3916,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-12-27 18:24:57.108793421 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-12-27 18:24:57.112793449 +0000 @@ -3156,7 +3156,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-12-27 18:24:57.264794493 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-12-27 18:24:57.260794465 +0000 @@ -3156,7 +3156,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-12-27 18:24:57.404795454 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-12-27 18:24:57.408795481 +0000 @@ -491,24 +491,24 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      class FE_DGP< dim, spacedim >

      Discontinuous finite elements based on Legendre polynomials.

      -

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      -

      The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

      +

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      +

      The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

      The shape functions are defined in the class PolynomialSpace. The polynomials used inside PolynomialSpace are Polynomials::Legendre up to degree p given in FE_DGP. For the ordering of the basis functions, refer to PolynomialSpace, remembering that the Legendre polynomials are ordered by ascending degree.

      Note
      This element is not defined by finding shape functions within the given function space that interpolate a particular set of points. Consequently, there are no support points to which a given function could be interpolated; finding a finite element function that approximates a given function is therefore only possible through projection, rather than interpolation. Secondly, the shape functions of this element do not jointly add up to one. As a consequence of this, adding or subtracting a constant value – such as one would do to make a function have mean value zero – can not be done by simply subtracting the constant value from each degree of freedom. Rather, one needs to use the fact that the first basis function is constant equal to one and simply subtract the constant from the value of the degree of freedom corresponding to this first shape function on each cell.
      This class is only partially implemented for the codimension one case (spacedim != dim ), since no passage of information between meshes of different refinement level is possible because the embedding and projection matrices are not computed in the class constructor.

      Transformation properties

      -

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      -

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      +

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      +

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      -

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      -

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      +

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      +

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -
      @@ -517,11 +517,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -545,11 +545,11 @@

      -
      @@ -533,9 +533,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -576,9 +576,9 @@ - @@ -591,11 +591,11 @@ - +
      @@ -561,9 +561,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -622,9 +622,9 @@ - @@ -637,9 +637,9 @@ - @@ -652,9 +652,9 @@ - @@ -667,11 +667,11 @@ - +
      @@ -607,9 +607,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -698,9 +698,9 @@ -
      @@ -683,9 +683,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-12-27 18:24:57.556796498 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-12-27 18:24:57.564796553 +0000 @@ -504,21 +504,21 @@

      Detailed Description

      template<int dim>
      class FE_DGPMonomial< dim >

      Discontinuous finite elements based on monomials.

      -

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      +

      This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

      The basis functions for this element are chosen to be the monomials listed above. Note that this is the main difference to the FE_DGP class that uses a set of polynomials of complete degree p that form a Legendre basis on the unit square. Thus, there, the mass matrix is diagonal, if the grid cells are parallelograms. The basis here does not have this property; however, it is simpler to compute. On the other hand, this element has the additional disadvantage that the local cell matrices usually have a worse condition number than the ones originating from the FE_DGP element.

      This class is not implemented for the codimension one case (spacedim != dim).

      Transformation properties

      -

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      -

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      +

      It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

      +

      This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

      -

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      -

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      +

      For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

      +

      For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      -
      @@ -527,11 +527,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -555,11 +555,11 @@

      -
      @@ -543,9 +543,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -586,9 +586,9 @@ - @@ -601,11 +601,11 @@ - +
      @@ -571,9 +571,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -632,9 +632,9 @@ - @@ -647,9 +647,9 @@ - @@ -662,9 +662,9 @@ - @@ -677,11 +677,11 @@ - +
      @@ -617,9 +617,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -708,9 +708,9 @@ - @@ -723,9 +723,9 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-12-27 18:24:57.708797542 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-12-27 18:24:57.716797597 +0000 @@ -499,7 +499,7 @@

      Besides, this class is not implemented for the codimension one case (spacedim != dim).

      Visualization of shape functions

      In 2d, the shape functions of this element look as follows.

      -

      $P_0$ element

      +

      $P_0$ element

      @@ -693,9 +693,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      -
      @@ -508,11 +508,11 @@

      $P_0$ element, shape function 0

      +

      $P_0$ element, shape function 0

      -

      $P_1$ element

      +

      $P_1$ element

      - @@ -536,11 +536,11 @@

      -
      @@ -524,9 +524,9 @@

      $P_1$ element, shape function 0

      +

      $P_1$ element, shape function 0

      -

      $P_1$ element, shape function 1

      +

      $P_1$ element, shape function 1

      $P_1$ element, shape function 2

      +

      $P_1$ element, shape function 2

      -

      $P_2$ element

      +

      $P_2$ element

      - @@ -567,9 +567,9 @@ - @@ -582,11 +582,11 @@ - +
      @@ -552,9 +552,9 @@

      $P_2$ element, shape function 0

      +

      $P_2$ element, shape function 0

      -

      $P_2$ element, shape function 1

      +

      $P_2$ element, shape function 1

      $P_2$ element, shape function 2

      +

      $P_2$ element, shape function 2

      -

      $P_2$ element, shape function 3

      +

      $P_2$ element, shape function 3

      $P_2$ element, shape function 4

      +

      $P_2$ element, shape function 4

      -
      $P_2$ element, shape function 5
      $P_2$ element, shape function 5
      -

      $P_3$ element

      +

      $P_3$ element

      - @@ -613,9 +613,9 @@ - @@ -628,9 +628,9 @@ - @@ -643,9 +643,9 @@ - @@ -658,11 +658,11 @@ - +
      @@ -598,9 +598,9 @@

      $P_3$ element, shape function 0

      +

      $P_3$ element, shape function 0

      -

      $P_3$ element, shape function 1

      +

      $P_3$ element, shape function 1

      $P_3$ element, shape function 2

      +

      $P_3$ element, shape function 2

      -

      $P_3$ element, shape function 3

      +

      $P_3$ element, shape function 3

      $P_3$ element, shape function 4

      +

      $P_3$ element, shape function 4

      -

      $P_3$ element, shape function 5

      +

      $P_3$ element, shape function 5

      $P_3$ element, shape function 6

      +

      $P_3$ element, shape function 6

      -

      $P_3$ element, shape function 7

      +

      $P_3$ element, shape function 7

      $P_3$ element, shape function 8

      +

      $P_3$ element, shape function 8

      -
      $P_3$ element, shape function 9
      $P_3$ element, shape function 9
      -

      $P_4$ element

      +

      $P_4$ element

      - @@ -689,9 +689,9 @@ - @@ -704,9 +704,9 @@ - @@ -719,9 +719,9 @@ - /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-12-27 18:24:57.852798531 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-12-27 18:24:57.856798558 +0000 @@ -530,7 +530,7 @@ *

      with node 13 being placed in the interior of the hex.

      Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

      Unit support point distribution and conditioning of interpolation

      -

      When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      +

      When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

      Definition at line 111 of file fe_dgq.h.

      @@ -2294,17 +2294,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2337,21 +2337,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3481,7 +3481,7 @@
      @@ -674,9 +674,9 @@

      $P_4$ element, shape function 0

      +

      $P_4$ element, shape function 0

      -

      $P_4$ element, shape function 1

      +

      $P_4$ element, shape function 1

      $P_4$ element, shape function 2

      +

      $P_4$ element, shape function 2

      -

      $P_4$ element, shape function 3

      +

      $P_4$ element, shape function 3

      $P_4$ element, shape function 4

      +

      $P_4$ element, shape function 4

      -

      $P_4$ element, shape function 5

      +

      $P_4$ element, shape function 5

      $P_4$ element, shape function 6

      +

      $P_4$ element, shape function 6

      -

      $P_4$ element, shape function 7

      +

      $P_4$ element, shape function 7

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3583,7 +3583,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3844,8 +3844,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-12-27 18:24:57.992799492 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-12-27 18:24:58.000799547 +0000 @@ -2188,17 +2188,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2231,21 +2231,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3375,7 +3375,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3477,7 +3477,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3738,8 +3738,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-12-27 18:24:58.144800536 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-12-27 18:24:58.144800536 +0000 @@ -2190,17 +2190,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2233,21 +2233,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3377,7 +3377,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3479,7 +3479,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-12-27 18:24:58.296801580 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-12-27 18:24:58.288801525 +0000 @@ -2190,17 +2190,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2233,21 +2233,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3377,7 +3377,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3479,7 +3479,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-12-27 18:24:58.436802541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-12-27 18:24:58.444802596 +0000 @@ -3156,7 +3156,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3258,7 +3258,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3548,8 +3548,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3583,11 +3583,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-12-27 18:24:58.588803585 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-12-27 18:24:58.600803668 +0000 @@ -3173,7 +3173,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3275,7 +3275,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3565,8 +3565,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3600,11 +3600,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-12-27 18:24:58.740804629 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-12-27 18:24:58.744804656 +0000 @@ -500,12 +500,12 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      class FE_Enriched< dim, spacedim >

      Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

      -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1146.png"/>

      -

      where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

      +

      where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

      The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

      @@ -513,7 +513,7 @@
      Definition fe_q.h:554

      In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

      -

      As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      +

      As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

      @@ -526,7 +526,7 @@
      1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

      Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -540,10 +540,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1155.png"/>

      Using enriched and non-enriched FEs together

      -

      In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
      +

      In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

      This constructor is equivalent to calling

      FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
      FE_Nothing<dim>(1,true),
      nullptr);
      @@ -3233,7 +3233,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3335,7 +3335,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3625,8 +3625,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3660,11 +3660,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-12-27 18:24:58.888805645 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-12-27 18:24:58.892805673 +0000 @@ -3240,7 +3240,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3342,7 +3342,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3603,8 +3603,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3638,11 +3638,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.028806607 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.032806634 +0000 @@ -3449,7 +3449,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3551,7 +3551,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3808,8 +3808,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3843,11 +3843,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-12-27 18:24:59.180807651 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-12-27 18:24:59.176807623 +0000 @@ -3287,7 +3287,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3389,7 +3389,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3650,8 +3650,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.316808585 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:24:59.328808667 +0000 @@ -3000,7 +3000,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3102,7 +3102,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3359,8 +3359,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3394,11 +3394,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-12-27 18:24:59.480809711 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-12-27 18:24:59.476809683 +0000 @@ -493,8 +493,8 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_Hermite< dim, spacedim >

      This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

      -

      Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

      +class FE_Hermite< dim, spacedim >

      This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

      +

      Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

      FE_Hermite<1>(3)

      * (0)________________(2)
       * (1)                (3)
      @@ -2037,17 +2037,17 @@
       

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2080,21 +2080,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3457,7 +3457,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3559,7 +3559,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3849,8 +3849,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3884,11 +3884,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -5180,7 +5180,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -

      Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

      +

      Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

      Definition at line 262 of file fe_hermite.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-12-27 18:24:59.632810754 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-12-27 18:24:59.640810809 +0000 @@ -532,12 +532,12 @@

      Detailed Description

      template<int dim>
      class FE_Nedelec< dim >
      Warning
      Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
      -

      Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

      -

      The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

      +

      Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

      +

      The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

      Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

      We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

      -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -545,7 +545,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

      Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

      This class is not implemented for the codimension one case (spacedim != dim).

      @@ -1405,11 +1405,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -4104,7 +4104,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4206,7 +4206,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4467,8 +4467,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-12-27 18:24:59.784811799 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-12-27 18:24:59.784811799 +0000 @@ -2985,7 +2985,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3087,7 +3087,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3377,8 +3377,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3412,11 +3412,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-12-27 18:24:59.828812101 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-12-27 18:24:59.832812128 +0000 @@ -160,9 +160,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

      Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

      The main quantities which are stored are associated with edge and face parameterizations. These are:

      • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
      • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
      • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
      • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

      The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

      @@ -295,9 +295,9 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

      -

      The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      -

      sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

      -

      Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

      +

      The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      +

      sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

      +

      Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

      Definition at line 354 of file fe_nedelec_sz.h.

      @@ -317,8 +317,8 @@

      Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

      -

      The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      -

      sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

      +

      The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

      +

      sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

      Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

      Definition at line 371 of file fe_nedelec_sz.h.

      @@ -381,10 +381,10 @@

      Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

      -

      Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

      -

      edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

      +

      The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

      +

      Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

      +

      edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

      Definition at line 414 of file fe_nedelec_sz.h.

      @@ -404,7 +404,7 @@

      Storage for gradients of edge extension parameters in 2d. In this case they are constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

      +

      edge_lambda_grads_2d[m][d] stores the gradient of the edge extension parameter for component $d$ on edge $m$.

      Definition at line 425 of file fe_nedelec_sz.h.

      @@ -424,7 +424,7 @@

      Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

      +

      edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

      Definition at line 436 of file fe_nedelec_sz.h.

      @@ -444,7 +444,7 @@

      Storage for 2nd derivatives of edge extension parameters in 3d, which are constant across the cell. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

      -

      edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

      +

      edge_lambda_gradgrads_3d[m][d1][d2] stores the 2nd derivatives of the edge extension parameters with respect to components d1 and d2 on edge $m$.

      Definition at line 448 of file fe_nedelec_sz.h.

      @@ -464,10 +464,10 @@

      Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

      -

      Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

      -

      face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

      +

      The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

      +

      Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

      +

      face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

      Definition at line 466 of file fe_nedelec_sz.h.

      @@ -487,7 +487,7 @@

      Storage for gradients of face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

      -

      face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

      +

      face_lambda_grads[m][d] stores the gradient of the face extension parameters for component $d$ on face $m$.

      Definition at line 476 of file fe_nedelec_sz.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-12-27 18:24:59.968813062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-12-27 18:24:59.976813117 +0000 @@ -482,7 +482,7 @@ class FE_Nothing< dim, spacedim >

      Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

      This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

      FE_Nothing as seen as a function space

      -

      Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

      +

      Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

      FE_Nothing in combination with other elements

      In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

      The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

      @@ -2888,7 +2888,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2990,7 +2990,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3251,8 +3251,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3286,11 +3286,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-12-27 18:25:00.112814051 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-12-27 18:25:00.120814106 +0000 @@ -489,13 +489,13 @@

      Detailed Description

      Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

      Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

      -

      Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

      +

      Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

      Dice Rule

      Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

      Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

      However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

      -

      In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

      +

      In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

      Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

      Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

      Shape functions

      @@ -511,11 +511,11 @@ * | | * | | * 0---------|---------1 -*

      For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

      -

      The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

      +*

      For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

      +

      The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

      • -

        shape function $\phi_0$:

        *  +--------0.0--------+
        +

        shape function $\phi_0$:

        *  +--------0.0--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -529,7 +529,7 @@
         *  

      • -

        shape function $\phi_1$:

        *  +--------0.0--------+
        +

        shape function $\phi_1$:

        *  +--------0.0--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -543,7 +543,7 @@
         *  

      • -

        shape function $\phi_2$:

        *  +--------0.5--------+
        +

        shape function $\phi_2$:

        *  +--------0.5--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -557,7 +557,7 @@
         *  

      • -

        shape function $\phi_3$:

        *  +--------0.5--------+
        +

        shape function $\phi_3$:

        *  +--------0.5--------+
         *  |                   |
         *  |                   |
         *  |                   |
        @@ -887,8 +887,8 @@
           
         
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      -

      Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

      +

      Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

      Definition at line 88 of file fe_p1nc.cc.

      @@ -2952,7 +2952,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3054,7 +3054,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3338,8 +3338,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3373,11 +3373,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-12-27 18:25:00.256815040 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-12-27 18:25:00.260815067 +0000 @@ -1420,17 +1420,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1465,21 +1465,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3013,7 +3013,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3115,7 +3115,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3405,8 +3405,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3440,11 +3440,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-12-27 18:25:00.404816056 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-12-27 18:25:00.412816111 +0000 @@ -2973,7 +2973,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3081,7 +3081,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3391,8 +3391,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3428,11 +3428,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-12-27 18:25:00.560817128 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-12-27 18:25:00.556817100 +0000 @@ -509,12 +509,12 @@

      Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

      Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

      Determining the correct basis

      -

      In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

      -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

      +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1225.png"/>

      -

      These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

      // Now compute the inverse node matrix, generating the correct
      +

      These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

      // Now compute the inverse node matrix, generating the correct
      // basis functions from the raw ones. For a discussion of what
      // exactly happens here, see FETools::compute_node_matrix.
      @@ -527,7 +527,7 @@
      void invert(const FullMatrix< number2 > &M)
      FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
      -

      The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

      +

      The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

      In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

      Setting the transformation

      In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

      @@ -2912,7 +2912,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3014,7 +3014,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3304,8 +3304,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3339,11 +3339,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-12-27 18:25:00.704818117 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-12-27 18:25:00.704818117 +0000 @@ -714,11 +714,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      @@ -1735,17 +1735,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1778,21 +1778,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3268,7 +3268,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3370,7 +3370,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3660,8 +3660,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-12-27 18:25:00.856819160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-12-27 18:25:00.852819133 +0000 @@ -851,11 +851,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1872,17 +1872,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1915,21 +1915,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3292,7 +3292,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3394,7 +3394,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3684,8 +3684,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-12-27 18:25:00.996820122 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-12-27 18:25:01.000820149 +0000 @@ -658,11 +658,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1685,17 +1685,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1728,21 +1728,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3276,7 +3276,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3378,7 +3378,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3668,8 +3668,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-12-27 18:25:01.152821193 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-12-27 18:25:01.156821220 +0000 @@ -508,7 +508,7 @@

      The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

      Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

      Unit support point distribution and conditioning of interpolation

      -

      When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      +

      When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

      The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

      If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

      Numbering of the degrees of freedom (DoFs)

      @@ -694,9 +694,9 @@ - @@ -709,9 +709,9 @@ - @@ -724,9 +724,9 @@ - @@ -739,9 +739,9 @@ - @@ -751,7 +751,7 @@

      -

      $Q_2$ element, shape function 0

      +

      $Q_2$ element, shape function 0

      -

      $Q_2$ element, shape function 1

      +

      $Q_2$ element, shape function 1

      $Q_2$ element, shape function 2

      +

      $Q_2$ element, shape function 2

      -

      $Q_2$ element, shape function 3

      +

      $Q_2$ element, shape function 3

      $Q_2$ element, shape function 4

      +

      $Q_2$ element, shape function 4

      -

      $Q_2$ element, shape function 5

      +

      $Q_2$ element, shape function 5

      $Q_2$ element, shape function 6

      +

      $Q_2$ element, shape function 6

      -

      $Q_2$ element, shape function 7

      +

      $Q_2$ element, shape function 7

      $Q_2$ element, shape function 8

      +

      $Q_2$ element, shape function 8

      @@ -920,9 +920,9 @@
      -

      $Q_4$ element, shape function 0

      +

      $Q_4$ element, shape function 0

      -

      $Q_4$ element, shape function 1

      +

      $Q_4$ element, shape function 1

      @@ -935,9 +935,9 @@ -

      $Q_4$ element, shape function 2

      +

      $Q_4$ element, shape function 2

      -

      $Q_4$ element, shape function 3

      +

      $Q_4$ element, shape function 3

      @@ -950,9 +950,9 @@ -

      $Q_4$ element, shape function 4

      +

      $Q_4$ element, shape function 4

      -

      $Q_4$ element, shape function 5

      +

      $Q_4$ element, shape function 5

      @@ -965,9 +965,9 @@ -

      $Q_4$ element, shape function 6

      +

      $Q_4$ element, shape function 6

      -

      $Q_4$ element, shape function 7

      +

      $Q_4$ element, shape function 7

      @@ -980,9 +980,9 @@ -

      $Q_4$ element, shape function 8

      +

      $Q_4$ element, shape function 8

      -

      $Q_4$ element, shape function 9

      +

      $Q_4$ element, shape function 9

      @@ -995,9 +995,9 @@ -

      $Q_4$ element, shape function 10

      +

      $Q_4$ element, shape function 10

      -

      $Q_4$ element, shape function 11

      +

      $Q_4$ element, shape function 11

      @@ -1010,9 +1010,9 @@ -

      $Q_4$ element, shape function 12

      +

      $Q_4$ element, shape function 12

      -

      $Q_4$ element, shape function 13

      +

      $Q_4$ element, shape function 13

      @@ -1025,9 +1025,9 @@ -

      $Q_4$ element, shape function 14

      +

      $Q_4$ element, shape function 14

      -

      $Q_4$ element, shape function 15

      +

      $Q_4$ element, shape function 15

      @@ -1040,9 +1040,9 @@ -

      $Q_4$ element, shape function 16

      +

      $Q_4$ element, shape function 16

      -

      $Q_4$ element, shape function 17

      +

      $Q_4$ element, shape function 17

      @@ -1055,9 +1055,9 @@ -

      $Q_4$ element, shape function 18

      +

      $Q_4$ element, shape function 18

      -

      $Q_4$ element, shape function 19

      +

      $Q_4$ element, shape function 19

      @@ -1070,9 +1070,9 @@ -

      $Q_4$ element, shape function 20

      +

      $Q_4$ element, shape function 20

      -

      $Q_4$ element, shape function 21

      +

      $Q_4$ element, shape function 21

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-12-27 18:25:01.296822181 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-12-27 18:25:01.300822209 +0000 @@ -2265,17 +2265,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2308,21 +2308,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3486,7 +3486,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3588,7 +3588,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3849,8 +3849,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3884,11 +3884,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-12-27 18:25:01.452823253 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-12-27 18:25:01.448823226 +0000 @@ -507,17 +507,17 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_Q_Bubbles< dim, spacedim >

      Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

      +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1233.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

      This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

      For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

      -

      Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

      +

      Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

      -

      Therefore, this element should be used with care for $p>3$.

      +

      Therefore, this element should be used with care for $p>3$.

      Implementation

      The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

      Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

      @@ -736,11 +736,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2459,17 +2459,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2502,21 +2502,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3593,7 +3593,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3695,7 +3695,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3956,8 +3956,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-12-27 18:25:01.596824242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-12-27 18:25:01.600824270 +0000 @@ -909,11 +909,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2636,17 +2636,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2679,21 +2679,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3770,7 +3770,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3872,7 +3872,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4133,8 +4133,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-12-27 18:25:01.760825368 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-12-27 18:25:01.768825423 +0000 @@ -3219,17 +3219,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -3264,21 +3264,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -4658,7 +4658,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -4760,7 +4760,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -5021,8 +5021,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -5056,11 +5056,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-12-27 18:25:01.904826357 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-12-27 18:25:01.908826384 +0000 @@ -504,14 +504,14 @@
      template<int dim, int spacedim = dim>
      class FE_Q_iso_Q1< dim, spacedim >

      Implementation of a scalar Lagrange finite element Qp-iso-Q1 that defines the finite element space of continuous, piecewise linear elements with p subdivisions in each coordinate direction. It yields an element with the same number of degrees of freedom as the Qp elements but using linear interpolation instead of higher order one. In other words, on every cell, the shape functions are not of higher order polynomial degree interpolating a set of node points, but are piecewise (bi-, tri-)linear within the cell and interpolating the same set of node points. This type of element is also called macro element in the literature as it can be seen as consisting of several smaller elements, namely pdim such sub-cells.

      The numbering of degrees of freedom is done in exactly the same way as in FE_Q of degree p. See there for a detailed description on how degrees of freedom are numbered within one element.

      -

      This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

      +

      This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

      Nonetheless, there are a few use cases where this element actually is useful:

      1. Systems of PDEs where certain variables demand for higher resolutions than the others and the additional degrees of freedom should be spent on increasing the resolution of linears instead of higher order polynomials, and you do not want to use two different meshes for the different components. This can be the case when irregularities (shocks) appear in the solution and stabilization techniques are used that work for linears but not higher order elements.

      2. -

        Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau +

        Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau element" and dates back to around the same time as the Taylor-Hood element (namely, the mid-1970s). For more information, see the paper by Bercovier and Pironneau from 1979 [Bercovier1979], and for the origins of the comparable Taylor-Hood element see [Taylor73] from 1973.

      3. @@ -2406,17 +2406,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2449,21 +2449,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3540,7 +3540,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3642,7 +3642,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3903,8 +3903,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-12-27 18:25:02.052827373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-12-27 18:25:02.060827428 +0000 @@ -510,7 +510,7 @@ class FE_RT_Bubbles< dim >

      This class implements a curl-enhanced Raviart-Thomas elements, conforming with Hdiv space. The node functionals are defined as point values in Gauss-Lobatto points. These elements generate vector fields with normal components continuous between mesh cells. The purpose of this finite element is in localizing the interactions between degrees of freedom around the nodes when an appropriate quadrature rule is used, leading to a block-diagonal mass matrix (even with full-tensor coefficient).

      The elements are defined through enrichment of classical Raviart-Thomas elements with extra curls, so that the Hdiv conformity is preserved, and the total number of degrees of freedom of FE_RT_Bubbles of order k is equal to the number of DoFs in dim copies of FE_Q of order k.

      Note
      Unlike Raviart-Thomas, the lowest possible order for this enhanced finite element is 1, i.e. $k \ge 1$.
      -

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      +

      The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

      For this enhanced Raviart-Thomas element, the node values are not cell and face moments with respect to certain polynomials, but the values in Gauss-Lobatto quadrature points. The nodal values on edges (faces in 3d) are evaluated first, according to the natural ordering of the edges (faces) of a cell. The interior degrees of freedom are evaluated last.

      For an RT-Bubbles element of degree k, we choose (k+1)dim-1 Gauss-Lobatto points on each face. These points are ordered lexicographically with respect to the orientation of the face. In the interior of the cells, the values are computed using an anisotropic Gauss-Lobatto formula for integration. The mass matrix assembled with the use of this same quadrature rule, is block diagonal with blocks corresponding to quadrature points. See "Higher order multipoint flux mixed finite element methods on quadrilaterals and hexahedra" for more details.

      @@ -523,7 +523,7 @@

      - +
      Left - $2d,\,k=3$, right - $3d,\,k=2$.
      Left - $2d,\,k=3$, right - $3d,\,k=2$.
      Todo
      Implement restriction matrices
      @@ -749,11 +749,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3348,7 +3348,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3450,7 +3450,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3740,8 +3740,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-12-27 18:25:02.204828417 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-12-27 18:25:02.200828389 +0000 @@ -730,11 +730,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1859,17 +1859,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1904,21 +1904,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3394,7 +3394,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3496,7 +3496,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3786,8 +3786,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-12-27 18:25:02.360829489 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-12-27 18:25:02.356829461 +0000 @@ -519,11 +519,11 @@

      Detailed Description

      template<int dim>
      -class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      +class FE_RaviartThomas< dim >

      Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

      Other properties of the Raviart-Thomas element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one. (There is, however, the FE_RaviartThomasNodal element that uses point values.)

      We follow the commonly used – though confusing – definition of the "degree" of RT elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_RaviartThomas(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

      -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -531,7 +531,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

      This class is not implemented for the codimension one case (spacedim != dim).

      Interpolation

      @@ -798,11 +798,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3463,7 +3463,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3565,7 +3565,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3826,8 +3826,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-12-27 18:25:02.516830560 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-12-27 18:25:02.516830560 +0000 @@ -811,11 +811,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -3482,7 +3482,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3584,7 +3584,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3874,8 +3874,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-12-27 18:25:02.652831494 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-12-27 18:25:02.656831521 +0000 @@ -490,7 +490,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      +class FE_SimplexDGP< dim, spacedim >

      Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

      Also see Simplex support.

      Definition at line 188 of file fe_simplex_p.h.

      @@ -1050,11 +1050,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2071,17 +2071,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2114,21 +2114,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3344,7 +3344,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3446,7 +3446,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3707,8 +3707,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-12-27 18:25:02.792832455 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-12-27 18:25:02.800832510 +0000 @@ -490,7 +490,7 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

      +class FE_SimplexP< dim, spacedim >

      Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

      Also see Simplex support.

      Definition at line 132 of file fe_simplex_p.h.

      @@ -1050,11 +1050,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -2071,17 +2071,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2114,21 +2114,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3344,7 +3344,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3446,7 +3446,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3707,8 +3707,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-12-27 18:25:02.936833444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-12-27 18:25:02.944833499 +0000 @@ -957,11 +957,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1978,17 +1978,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -2021,21 +2021,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3336,7 +3336,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3438,7 +3438,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3699,8 +3699,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-12-27 18:25:03.084834460 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-12-27 18:25:03.084834460 +0000 @@ -916,11 +916,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1943,17 +1943,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1986,21 +1986,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3359,7 +3359,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3461,7 +3461,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3722,8 +3722,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-12-27 18:25:03.228835449 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-12-27 18:25:03.236835504 +0000 @@ -3274,7 +3274,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3376,7 +3376,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3637,8 +3637,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:03.376836466 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:03.376836466 +0000 @@ -3449,7 +3449,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3551,7 +3551,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3808,8 +3808,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3843,11 +3843,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-12-27 18:25:03.512837400 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-12-27 18:25:03.520837454 +0000 @@ -714,11 +714,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      @@ -1735,17 +1735,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1778,21 +1778,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3268,7 +3268,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3370,7 +3370,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3660,8 +3660,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-12-27 18:25:03.664838443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-12-27 18:25:03.668838471 +0000 @@ -851,11 +851,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1872,17 +1872,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1915,21 +1915,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3292,7 +3292,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3394,7 +3394,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3684,8 +3684,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-12-27 18:25:03.800839377 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-12-27 18:25:03.808839432 +0000 @@ -658,11 +658,11 @@

      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      @@ -1685,17 +1685,17 @@

      Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

      Before the correction, the Hessians would be given by

      -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

      +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative.

      @@ -1728,21 +1728,21 @@

      Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

      Before the correction, the third derivatives would be given by

      -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

      -

      where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

      +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

      -

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      +

      where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

      @@ -3276,7 +3276,7 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3378,7 +3378,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -3668,8 +3668,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-12-27 18:25:03.968840531 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-12-27 18:25:03.968840531 +0000 @@ -501,7 +501,7 @@ class FiniteElement< dim, spacedim >

      This is the base class for finite elements in arbitrary dimensions. It declares the interface both in terms of member variables and public member functions through which properties of a concrete implementation of a finite element can be accessed. This interface generally consists of a number of groups of variables and functions that can roughly be delineated as follows:

      • Basic information about the finite element, such as the number of degrees of freedom per vertex, edge, or cell. This kind of data is stored in the FiniteElementData base class. (Though the FiniteElement::get_name() member function also falls into this category.)
      • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
      • -
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • +
      • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
      • Functions that describe the properties of individual shape functions, for example which vector components of a vector-valued finite element's shape function is nonzero, or whether an element is primitive.
      • For elements that are interpolatory, such as the common $Q_p$ Lagrange elements, data that describes where their support points are located.
      • Functions that define the interface to the FEValues class that is almost always used to access finite element shape functions from user code.
      • @@ -586,7 +586,7 @@
      21 1 0 8 1

      What we see is the following: there are a total of 22 degrees-of-freedom on this element with components ranging from 0 to 2. Each DoF corresponds to one of the two base elements used to build FESystem : $\mathbb Q_2$ or $\mathbb
-  Q_1$. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      + Q_1$" src="form_1037.png"/>. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

      Support points

      Finite elements are frequently defined by defining a polynomial space and a set of dual functionals. If these functionals involve point evaluations, then the element is "interpolatory" and it is possible to interpolate an arbitrary (but sufficiently smooth) function onto the finite element space by evaluating it at these points. We call these points "support points".

      Most finite elements are defined by mapping from the reference cell to a concrete cell. Consequently, the support points are then defined on the reference ("unit") cell, see this glossary entry. The support points on a concrete cell can then be computed by mapping the unit support points, using the Mapping class interface and derived classes, typically via the FEValues class.

      @@ -618,8 +618,8 @@

      Through this construction, the degrees of freedom on the child faces are constrained to the degrees of freedom on the parent face. The information so provided is typically consumed by the DoFTools::make_hanging_node_constraints() function.

      Note
      The hanging node constraints described by these matrices are only relevant to the case where the same finite element space is used on neighboring (but differently refined) cells. The case that the finite element spaces on different sides of a face are different, i.e., the $hp$ case (see hp-finite element support) is handled by separate functions. See the FiniteElement::get_face_interpolation_matrix() and FiniteElement::get_subface_interpolation_matrix() functions.

      Interpolation matrices in three dimensions

      -

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      -

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      +

      For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

      +

      The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

      The order of the twelve lines and the four child faces can be extracted from the following sketch, where the overall order of the different dof groups is depicted:

      *    *--15--4--16--*
       *    |      |      |
       *    10 19  6  20  12
      @@ -660,7 +660,7 @@
       
    • Compute the basis vj of the finite element shape function space by applying M-1 to the basis wj.
    • -

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      +

      The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

      this->inverse_node_matrix.reinit(this->n_dofs_per_cell(),
      this->n_dofs_per_cell()); this->inverse_node_matrix.invert(M);
      @@ -697,7 +697,7 @@ R_1 = \left(\begin{matrix}0 & 0 \\ 0 & 1\end{matrix}\right). \]" src="form_1057.png"/>

      -

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      +

      However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

      meshes: *-------* *----*----*
      local DoF numbers: 0 2 1 0 2 1|0 2 1
      global DoF numbers: 0 2 1 0 2 1 4 3

      Writing things as the sum over matrix operations as above would not easily work because we have to add nonzero values to $U^\text{coarse}_2$ twice, once for each child.

      @@ -2629,7 +2629,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2713,7 +2713,7 @@
      scalarAn object that represents a single scalar vector component of this finite element.

      Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      Parameters
      @@ -2967,8 +2967,8 @@
      component_maskThe mask that selects individual components of the finite element

      For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

      -

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      -

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      +

      For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

      +

      To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

      On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

      Parameters
      @@ -3004,11 +3004,11 @@
      -

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      -

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      -

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      -

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      +

      Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

      +

      In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

      +

      The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

      +

      Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-12-27 18:25:04.024840915 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-12-27 18:25:04.028840943 +0000 @@ -332,8 +332,8 @@
      [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
      - - + +
      [in]dofs_per_objectA vector that describes the number of degrees of freedom on geometrical objects for each dimension. This vector must have size dim+1, and entry 0 describes the number of degrees of freedom per vertex, entry 1 the number of degrees of freedom per line, etc. As an example, for the common $Q_1$ Lagrange element in 2d, this vector would have elements (1,0,0). On the other hand, for a $Q_3$ element in 3d, it would have entries (1,2,4,8).
      [in]n_componentsNumber of vector components of the element.
      [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
      [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
      [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
      [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
      [in]block_indicesAn argument that describes how the base elements of a finite element are grouped. The default value constructs a single block that consists of all dofs_per_cell degrees of freedom. This is appropriate for all "atomic" elements (including non-primitive ones) and these can therefore omit this argument. On the other hand, composed elements such as FESystem will want to pass a different value here.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-12-27 18:25:04.056841135 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteSizeHistory.html 2024-12-27 18:25:04.060841162 +0000 @@ -142,7 +142,7 @@
      template<typename T>
      class FiniteSizeHistory< T >

      A helper class to store a finite-size collection of objects of type T. If the number of elements exceeds the specified maximum size of the container, the oldest element is removed. Additionally, random access and removal of elements is implemented. Indexing is done relative to the last added element.

      In order to optimize the container for usage with memory-demanding objects (i.e. linear algebra vectors), the removal of an element does not free the memory. Instead the element is being kept in a separate cache so that subsequent addition does not require re-allocation of memory.

      -

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      +

      The primary usage of this class is in solvers to store a history of vectors. That is, if at the iteration $k$ we store $m$ vectors from previous iterations $\{k-1,k-2,...,k-m\}$, then addition of the new element will make the object contain elements from iterations $\{k,k-1,k-2,...,k-m+1\}$.

      Definition at line 48 of file history.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-12-27 18:25:04.100841437 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-12-27 18:25:04.104841464 +0000 @@ -473,7 +473,7 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -482,7 +482,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, dim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-12-27 18:25:04.160841849 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-12-27 18:25:04.164841876 +0000 @@ -1096,8 +1096,8 @@
      -

      Return the l1-norm of the matrix, where $||M||_1 =
-\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      +

      Return the l1-norm of the matrix, where $||M||_1 =
+\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

      @@ -1117,8 +1117,8 @@
      -

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
-\sum_j |M_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
+\sum_j |M_{ij}|$ (maximum of the sums over rows).

      @@ -2071,7 +2071,7 @@

      A=Inverse(A). A must be a square matrix. Inversion of this matrix by Gauss-Jordan algorithm with partial pivoting. This process is well-behaved for positive definite matrices, but be aware of round-off errors in the indefinite case.

      In case deal.II was configured with LAPACK, the functions Xgetrf and Xgetri build an LU factorization and invert the matrix upon that factorization, providing best performance up to matrices with a few hundreds rows and columns.

      -

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      +

      The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

      @@ -2115,7 +2115,7 @@
      -

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      +

      Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

      ExcMatrixNotPositiveDefinite will be thrown in the case that the matrix is not positive definite.

      @@ -2139,7 +2139,7 @@ const Vector< number2 > & W&#href_anchor"memdoc"> -

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      +

      *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-12-27 18:25:04.216842234 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-12-27 18:25:04.220842261 +0000 @@ -256,14 +256,14 @@

      Functions that return tensors

      If the functions you are dealing with have a number of components that are a priori known (for example, dim elements), you might consider using the TensorFunction class instead. This is, in particular, true if the objects you return have the properties of a tensor, i.e., they are for example dim-dimensional vectors or dim-by-dim matrices. On the other hand, functions like VectorTools::interpolate or VectorTools::interpolate_boundary_values definitely only want objects of the current type. You can use the VectorFunctionFromTensorFunction class to convert the former to the latter.

      Functions that return vectors of other data types

      -

      Most of the time, your functions will have the form $f : \Omega \rightarrow
-{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
-C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      +

      Most of the time, your functions will have the form $f : \Omega \rightarrow
+{\mathbb R}^{n_\text{components}}$. However, there are occasions where you want the function to return vectors (or scalars) over a different number field, for example functions that return complex numbers or vectors of complex numbers: $f : \Omega \rightarrow {\mathbb
+C}^{n_\text{components}}$. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

      Template Parameters
      - - + +
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
-  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
      RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
+  R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-12-27 18:25:04.272842618 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionDerivative.html 2024-12-27 18:25:04.272842618 +0000 @@ -360,27 +360,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-12-27 18:25:04.324842976 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-12-27 18:25:04.328843003 +0000 @@ -553,7 +553,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. The default implementation calls the get_gradient() method of the FunctionManifold::push_forward_function() member class. If you construct this object using the constructor that takes two string expression, then the default implementation of this method uses a finite difference scheme to compute the gradients(see the AutoDerivativeFunction() class for details), and you can specify the size of the spatial step size at construction time with the h parameter.

      Refer to the general documentation of this class for more information.

      @@ -720,24 +720,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -746,11 +746,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 2024-12-27 18:25:04.388843415 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionParser.html 2024-12-27 18:25:04.396843470 +0000 @@ -522,27 +522,27 @@

      Names of difference formulas.

      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-12-27 18:25:04.448843827 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-12-27 18:25:04.452843855 +0000 @@ -228,7 +228,7 @@

      Detailed Description

      template<int dim>
      -class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      +class Functions::CoordinateRestriction< dim >

      This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

      double z = ...
      unsigned int restricted_direction = 2;
      @@ -236,7 +236,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      +

      The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

      Definition at line 50 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-12-27 18:25:04.496844157 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-12-27 18:25:04.500844184 +0000 @@ -243,7 +243,7 @@ x=(x,y,z)$" src="form_492.png"/> will find the box so that $x_k\le x\le x_{k+1}, y_l\le y\le
 y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.
      +
      Note
      If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of the related class InterpolatedUniformGridData is discussed in step-53.

      Dealing with large data sets

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-12-27 18:25:04.552844541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-12-27 18:25:04.556844569 +0000 @@ -235,7 +235,7 @@ class Functions::InterpolatedUniformGridData< dim >

      A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a uniformly spaced tensor product mesh. In other words, considering the three-dimensional case, let there be points $x_0,\ldots, x_{K-1}$ that result from a uniform subdivision of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x = (x_{K-1}-x_0)/(K-1)$, and similarly $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1},
 y_l\le y\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

      This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

      -
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.
      +
      Note
      If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.

      If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

      Note
      The use of this class is discussed in step-53.

      Dealing with large data sets

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-12-27 18:25:04.600844871 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-12-27 18:25:04.604844898 +0000 @@ -229,13 +229,13 @@

      Detailed Description

      A function that solves the Laplace equation (with specific boundary values but zero right hand side) and that has a singularity at the center of the L-shaped domain in 2d (i.e., at the location of the re-entrant corner of this non-convex domain).

      The function is given in polar coordinates by $r^{\frac{2}{3}}
-\sin(\frac{2}{3} \phi)$ with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      +\sin(\frac{2}{3} \phi)$" src="form_466.png"/> with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

      This function is often used to illustrate that the solutions of the Laplace equation

      \[
   -\Delta u = 0
 \]

      -

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      +

      can be singular even if the boundary values are smooth. (Here, if the domain is the L-shaped domain $(-1,1)^2 \backslash [0,1]^2$, the boundary values for $u$ are zero on the two line segments adjacent to the origin, and equal to $r^{\frac{2}{3}} \sin(\frac{2}{3} \phi)$ on the remaining parts of the boundary.) The function itself remains bounded on the domain, but its gradient is of the form $r^{-1/3}$ in the vicinity of the origin and consequently diverges as one approaches the origin.

      Definition at line 409 of file function_lib.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-12-27 18:25:04.660845283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1ParsedFunction.html 2024-12-27 18:25:04.668845338 +0000 @@ -392,27 +392,27 @@

      Names of difference formulas.

      Enumerator
      Euler 

      The symmetric Euler formula of second order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t+h) -
 u(t-h)}{2h}.
-\] +\]" src="form_359.png"/>

      UpwindEuler 

      The upwind Euler formula of first order:

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t) -
 u(t-h)}{h}.
-\] +\]" src="form_360.png"/>

      FourthOrder 

      The fourth order scheme

      -\[
+<picture><source srcset=\[
 u'(t) \approx
 \frac{u(t-2h) - 8u(t-h)
 +  8u(t+h) - u(t+2h)}{12h}.
-\] +\]" src="form_361.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-12-27 18:25:04.712845640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 2024-12-27 18:25:04.716845667 +0000 @@ -231,7 +231,7 @@

      Detailed Description

      template<int dim>
      -class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      +class Functions::PointRestriction< dim >

      This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

      Point<2> point(y, z);
      unsigned int open_direction = 0;
      @@ -240,7 +240,7 @@
      const SmartPointer< const Function< dim+1 > > function
      -

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      +

      The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

      Definition at line 109 of file function_restriction.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-12-27 18:25:04.764845997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-12-27 18:25:04.772846052 +0000 @@ -332,8 +332,8 @@ const std::vector< double > & coefficients&#href_anchor"memdoc"> -

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
-x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      +

      Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
+x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

      Definition at line 2837 of file function_lib.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-12-27 18:25:04.812846327 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-12-27 18:25:04.816846354 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      class Functions::RayleighKotheVortex< dim >

      A class that represents a time-dependent function object for a Rayleigh–Kothe vortex vector field. This is generally used as flow pattern in complex test cases for interface tracking methods (e.g., volume-of-fluid and level-set approaches) since it leads to strong rotation and elongation of the fluid [Blais2013].

      -

      The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

      +

      The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

      \[
 \Psi = \frac{1}{\pi} \sin^2 (\pi x) \sin^2 (\pi y) \cos \left( \pi
 \frac{t}{T} \right)
/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-12-27 18:25:04.880846794 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-12-27 18:25:04.872846739 +0000
@@ -238,11 +238,11 @@
 <div class=

      template<int dim>
      class Functions::SignedDistance::Ellipsoid< dim >

      Signed-distance level set function to an ellipsoid defined by:

      -\[
+<picture><source srcset=\[
 \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} = 1
-\] +\]" src="form_533.png"/>

      -

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      +

      Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

      Definition at line 144 of file function_signed_distance.h.

      Member Typedef Documentation

      @@ -462,9 +462,9 @@

      Evaluates the ellipsoid function:

      -\[
+<picture><source srcset=\[
 f(\vec{x}) = \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} - 1
-\] +\]" src="form_539.png"/>

      Definition at line 200 of file function_signed_distance.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-12-27 18:25:04.924847096 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-12-27 18:25:04.928847123 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      +class Functions::SignedDistance::Plane< dim >

      Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

      Definition at line 104 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-12-27 18:25:04.980847480 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-12-27 18:25:04.984847508 +0000 @@ -226,7 +226,7 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::Rectangle< dim >

      Signed-distance level set function of a rectangle.

      -

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D rectangle are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-12-27 18:25:05.032847837 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-12-27 18:25:05.036847865 +0000 @@ -226,9 +226,9 @@

      Detailed Description

      template<int dim>
      -class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      -

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
-\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      +class Functions::SignedDistance::Sphere< dim >

      Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

      +

      This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
+\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

      Definition at line 48 of file function_signed_distance.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-12-27 18:25:05.088848222 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-12-27 18:25:05.092848249 +0000 @@ -226,8 +226,8 @@

      Detailed Description

      template<int dim>
      class Functions::SignedDistance::ZalesakDisk< dim >

      Signed-distance level set function of Zalesak's disk proposed in [zalesak1979fully].

      -

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
--\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      +

      It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
+-\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

      Contour surfaces of the signed distance function of a 3D Zalesak's disk are illustrated below:

      /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-12-27 18:25:05.144848606 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-12-27 18:25:05.148848634 +0000 @@ -275,7 +275,7 @@

      Detailed Description

      A singular solution to Stokes' equations on a 2d L-shaped domain.

      -

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      +

      This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

      Taken from Houston, Schötzau, Wihler, proceeding ENUMATH 2003.

      Definition at line 245 of file flow_function.h.

      @@ -1737,7 +1737,7 @@
      -

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      +

      The exponent of the radius, computed as the solution to $\sin(\lambda\omega)+\lambda \sin(\omega)=0$

      Definition at line 282 of file flow_function.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-12-27 18:25:05.196848963 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-12-27 18:25:05.204849018 +0000 @@ -920,7 +920,7 @@
      Returns
      This function returns a struct containing some extra data stored by the ExodusII file that cannot be loaded into a Triangulation - see ExodusIIData for more information.
      -

      A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

      +

      A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

      Definition at line 3772 of file grid_in.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-12-27 18:25:05.228849183 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-12-27 18:25:05.236849238 +0000 @@ -150,10 +150,10 @@

      Detailed Description

      template<typename number>
      class Householder< number >

      QR-decomposition of a full matrix.

      -

      This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

      +

      This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

      Implementation details

      -

      The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

      -

      The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

      +

      The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

      +

      The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

      Note
      Instantiations for this template are provided for <float> and <double>; others can be generated in application programs (see the section on Template instantiations in the manual).

      Definition at line 79 of file householder.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-12-27 18:25:05.260849403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-12-27 18:25:05.264849430 +0000 @@ -147,13 +147,13 @@  

      Detailed Description

      -

      Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

      +

      Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

      Initialization

      The main usefulness of this class lies in its ability to initialize other matrix, like this:

      std_cxx20::type_identity< T > identity
      -

      This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

      +

      This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

      Preconditioning

      No preconditioning at all is equivalent to preconditioning with preconditioning with the identity matrix. deal.II has a specialized class for this purpose, PreconditionIdentity, than can be used in a context as shown in the documentation of that class. The present class can be used in much the same way, although without any additional benefit:

      SolverControl solver_control (1000, 1e-12);
      SolverCG<> cg (solver_control);
      /usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-12-27 18:25:05.292849623 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-12-27 18:25:05.300849677 +0000 @@ -181,7 +181,7 @@

      Detailed Description

      template<typename VectorType>
      -class ImplicitQR< VectorType >

      A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

      +class ImplicitQR< VectorType >

      A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

      The class is designed to update a given (possibly empty) QR factorization due to the addition of a new column vector. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the column is removed.

      The VectorType template argument may either be a parallel and serial vector, and only need to have basic operations such as additions, scalar product, etc. It also needs to have a copy-constructor.

      @@ -346,7 +346,7 @@
      -

      Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

      +

      Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

      Implements BaseQR< VectorType >.

      @@ -380,7 +380,7 @@
      -

      Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      +

      Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      Implements BaseQR< VectorType >.

      @@ -414,7 +414,7 @@
      -

      Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

      +

      Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

      Implements BaseQR< VectorType >.

      @@ -448,7 +448,7 @@
      -

      Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      +

      Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

      Implements BaseQR< VectorType >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-12-27 18:25:05.356850062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-12-27 18:25:05.360850089 +0000 @@ -863,7 +863,7 @@
      -

      Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

      +

      Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

      Definition at line 1130 of file index_set.cc.

      @@ -1158,7 +1158,7 @@

      This command takes a "mask", i.e., a second index set of same size as the current one and returns the intersection of the current index set the mask, shifted to the index of an entry within the given mask. For example, if the current object is a an IndexSet object representing an index space [0,100) containing indices [20,40), and if the mask represents an index space of the same size but containing all 50 odd indices in this range, then the result will be an index set for a space of size 50 that contains those indices that correspond to the question "the how many'th entry in the mask are the indices [20,40). This will result in an index set of size 50 that contains the indices {11,12,13,14,15,16,17,18,19,20} (because, for example, the index 20 in the original set is not in the mask, but 21 is and corresponds to the 11th entry of the mask – the mask contains the elements {1,3,5,7,9,11,13,15,17,19,21,...}).

      In other words, the result of this operation is the intersection of the set represented by the current object and the mask, as seen within the mask. This corresponds to the notion of a view: The mask is a window through which we see the set represented by the current object.

      -

      A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

      +

      A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

      Definition at line 308 of file index_set.cc.

      @@ -1198,7 +1198,7 @@
      -

      Remove all elements contained in other from this set. In other words, if $x$ is the current object and $o$ the argument, then we compute $x
+<p>Remove all elements contained in <code>other</code> from this set. In other words, if <picture><source srcset=$x$ is the current object and $o$ the argument, then we compute $x
 \leftarrow x \backslash o$.

      Definition at line 473 of file index_set.cc.

      @@ -1943,7 +1943,7 @@
      -

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      +

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      is.add_range(0, N);

      This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

      if (my_index_set == complete_index_set(my_index_set.size())
      /usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-12-27 18:25:05.400850364 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-12-27 18:25:05.404850392 +0000 @@ -1228,7 +1228,7 @@
      -

      If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

      +

      If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

      Definition at line 317 of file polynomial.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-12-27 18:25:05.500851051 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-12-27 18:25:05.508851106 +0000 @@ -3861,7 +3861,7 @@
      -

      The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

      +

      The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

      Definition at line 988 of file lapack_full_matrix.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-12-27 18:25:05.548851381 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-12-27 18:25:05.556851436 +0000 @@ -934,7 +934,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Definition at line 492 of file cuda_vector.cc.

      @@ -988,7 +988,7 @@
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 531 of file cuda_vector.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 2024-12-27 18:25:05.608851793 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1EpetraWrappers_1_1Vector.html 2024-12-27 18:25:05.612851820 +0000 @@ -1129,7 +1129,7 @@

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      The vectors need to have the same layout.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 521 of file trilinos_epetra_vector.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-12-27 18:25:05.668852205 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-12-27 18:25:05.672852232 +0000 @@ -323,7 +323,7 @@

      Detailed Description

      template<typename Number>
      -class LinearAlgebra::ReadWriteVector< Number >

      ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

      +class LinearAlgebra::ReadWriteVector< Number >

      ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

      This class provides access to individual elements to be read or written. However, it does not allow global operations such as taking the norm or dot products between vectors.

      Storing elements

      Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:05.740852699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:05.748852754 +0000 @@ -1010,7 +1010,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      Definition at line 357 of file trilinos_tpetra_block_sparse_matrix.h.

      @@ -1042,7 +1042,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 372 of file trilinos_tpetra_block_sparse_matrix.h.

      @@ -2036,7 +2036,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2610,7 +2610,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2718,7 +2718,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-12-27 18:25:05.812853194 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-12-27 18:25:05.820853249 +0000 @@ -1562,7 +1562,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1588,7 +1588,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1640,7 +1640,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -1666,7 +1666,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -1692,7 +1692,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -1727,7 +1727,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -1974,7 +1974,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-12-27 18:25:05.888853716 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-12-27 18:25:05.892853743 +0000 @@ -1845,7 +1845,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector has to be initialized with the same IndexSet the matrix was initialized with.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-12-27 18:25:05.964854237 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-12-27 18:25:05.964854237 +0000 @@ -477,7 +477,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 102 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -509,7 +509,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

      Definition at line 113 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -799,7 +799,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 234 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -831,7 +831,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The vector n_entries_per_row specifies the number of entries in each row.

      Definition at line 248 of file trilinos_tpetra_sparsity_pattern.cc.

      @@ -1372,7 +1372,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      Definition at line 865 of file trilinos_tpetra_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 2024-12-27 18:25:06.028854677 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1Vector.html 2024-12-27 18:25:06.032854704 +0000 @@ -1570,7 +1570,7 @@

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      The vectors need to have the same layout.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-12-27 18:25:06.112855254 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-12-27 18:25:06.116855281 +0000 @@ -1357,7 +1357,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      @@ -1576,7 +1576,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

      Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

      +

      Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      Note
      Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
      @@ -1606,7 +1606,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

      Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

      +

      Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      Note
      Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
      @@ -1641,7 +1641,7 @@ const Number b = Number(1.)&#href_anchor"memdoc"> -

      Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

      +

      Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

      Obviously, this function can only be used if all blocks of both vectors are of the same size.

      @@ -1845,7 +1845,7 @@
      -

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      +

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      @@ -1865,7 +1865,7 @@
      -

      Return the square of the $l_2$ norm of the vector.

      +

      Return the square of the $l_2$ norm of the vector.

      @@ -1915,7 +1915,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const ::Vector< OtherNumber > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2563,7 +2563,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -2597,7 +2597,7 @@

      Performs a combined operation of a vector addition and a subsequent inner product, returning the value of the inner product. In other words, the result of this function is the same as if the user called

      this->add(a, V);
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2788,7 +2788,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-12-27 18:25:06.204855885 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-12-27 18:25:06.204855885 +0000 @@ -1034,7 +1034,7 @@ const MPI_Comm comm_sm = MPI_COMM_SELF&#href_anchor"memdoc">

      Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

      The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

      -
      Note
      In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      +
      Note
      In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      @@ -1757,7 +1757,7 @@
      -

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      +

      Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

      @@ -1777,7 +1777,7 @@
      -

      Return the square of the $l_2$ norm of the vector.

      +

      Return the square of the $l_2$ norm of the vector.

      @@ -1827,7 +1827,7 @@
      return_value = *this * W;

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2453,7 +2453,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-12-27 18:25:06.244856160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-12-27 18:25:06.252856215 +0000 @@ -237,7 +237,7 @@
      std::function< void(Range &v, bool omit_zeroing_entries)> reinit_range_vector

      that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

      The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

      -

      As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

      +

      As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      /usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-12-27 18:25:06.288856462 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-12-27 18:25:06.292856490 +0000 @@ -204,11 +204,11 @@

      In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

      ...
      Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
      ...
      -

      Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

      +

      Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

      Note
      Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

      Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

      Common use case: Computing tangent vectors

      -

      The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

      +

      The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

      To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

      For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

      @@ -216,11 +216,11 @@

      A unified description

      The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

      In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

      -

      Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

      -

      In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

      -

      Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

      -

      Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

      +

      Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

      +

      In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

      +

      Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

      +

      Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

      Definition at line 285 of file manifold.h.

      Member Typedef Documentation

      @@ -648,11 +648,11 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      -

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      -

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      +

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      +

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-12-27 18:25:06.356856929 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-12-27 18:25:06.360856957 +0000 @@ -245,7 +245,7 @@ class Mapping< dim, spacedim >

      Abstract base class for mapping classes.

      This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

      Mathematics of the mapping

      -

      The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
 \hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

      \[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
@@ -1003,7 +1003,7 @@
 </table>
 </div><div class=

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • @@ -1350,18 +1350,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -1416,35 +1416,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1501,21 +1501,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      @@ -1565,40 +1565,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-12-27 18:25:06.436857478 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 2024-12-27 18:25:06.440857506 +0000 @@ -874,18 +874,18 @@

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

      @@ -940,35 +940,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -1025,21 +1025,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1089,40 +1089,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

      @@ -1459,7 +1459,7 @@

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • @@ -1521,7 +1521,7 @@

      Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

      -

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      +

      Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
      • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
      • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 2024-12-27 18:25:06.504857945 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 2024-12-27 18:25:06.508857973 +0000 @@ -231,9 +231,9 @@

        Detailed Description

        template<int dim, int spacedim = dim>
        class MappingCartesian< dim, spacedim >

        A class providing a mapping from the reference cell to cells that are axiparallel, i.e., that have the shape of rectangles (in 2d) or boxes (in 3d) with edges parallel to the coordinate directions. The class therefore provides functionality that is equivalent to what, for example, MappingQ would provide for such cells. However, knowledge of the shape of cells allows this class to be substantially more efficient.

        -

        Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates $\hat {\mathbf
-x}$ to real coordinates $\mathbf x$ on each cell is of the form

        -\begin{align*}
+<p>Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates  <picture><source srcset=$\hat {\mathbf
+x}$ to real coordinates $\mathbf x$ on each cell is of the form

        +\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -242,10 +242,10 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1388.png"/>

        in 2d, and

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -255,9 +255,9 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1389.png"/>

        -

        in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

        +

        in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

        The class is intended for efficiency, and it does not do a whole lot of error checking. If you apply this mapping to a cell that does not conform to the requirements above, you will get strange results.

        Definition at line 78 of file mapping_cartesian.h.

        @@ -656,18 +656,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -724,35 +724,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -811,21 +811,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -877,40 +877,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1173,7 +1173,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-12-27 18:25:06.572858412 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-12-27 18:25:06.576858440 +0000 @@ -715,18 +715,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -783,35 +783,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -870,21 +870,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -936,40 +936,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1190,7 +1190,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 2024-12-27 18:25:06.648858934 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html 2024-12-27 18:25:06.652858962 +0000 @@ -785,18 +785,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -853,35 +853,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -940,21 +940,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1006,40 +1006,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1304,7 +1304,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2024-12-27 18:25:06.688859209 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField_1_1InternalData.html 2024-12-27 18:25:06.692859236 +0000 @@ -735,7 +735,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 467 of file mapping_fe_field.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-12-27 18:25:06.732859511 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE_1_1InternalData.html 2024-12-27 18:25:06.736859538 +0000 @@ -757,7 +757,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 362 of file mapping_fe.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-12-27 18:25:06.792859923 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 2024-12-27 18:25:06.792859923 +0000 @@ -633,18 +633,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -701,35 +701,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -788,21 +788,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -854,40 +854,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1108,7 +1108,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 2024-12-27 18:25:06.832860198 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold_1_1InternalData.html 2024-12-27 18:25:06.836860226 +0000 @@ -526,7 +526,7 @@
        -

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        +

        Tensors of contravariant transformation at each of the quadrature points. The contravariant matrix is the Jacobian of the transformation, i.e. $J_{ij}=dx_i/d\hat x_j$.

        Computed on each cell.

        Definition at line 298 of file mapping_manifold.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-12-27 18:25:06.912860747 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html 2024-12-27 18:25:06.920860802 +0000 @@ -266,7 +266,7 @@

        Detailed Description

        template<int dim, int spacedim = dim>
        -class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        +class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        Behavior along curved boundaries and with different manifolds

        For a number of applications, one only knows a manifold description of a surface but not the interior of the computational domain. In such a case, a FlatManifold object will be assigned to the interior entities that describes a usual planar coordinate system where the additional points for the higher order mapping are placed exactly according to a bi-/trilinear mapping. When combined with a non-flat manifold on the boundary, for example a circle bulging into the interior of a square cell, the two manifold descriptions are in general incompatible. For example, a FlatManifold defined solely through the cell's vertices would put an interior point located at some small distance epsilon away from the boundary along a straight line and thus in general outside the concave part of a circle. If the polynomial degree of MappingQ is sufficiently high, the transformation from the reference cell to such a cell would in general contain inverted regions close to the boundary.

        In order to avoid this situation, this class applies an algorithm for making this transition smooth using a so-called transfinite interpolation that is essentially a linear blend between the descriptions along the surrounding entities. In the algorithm that computes additional points, the compute_mapping_support_points() method, all the entities of the cells are passed through hierarchically, starting from the lines to the quads and finally hexes. Points on objects higher up in the hierarchy are obtained from the manifold associated with that object, taking into account all the points previously computed by the manifolds associated with the lower-dimensional objects, not just the vertices. If only a line is assigned a curved boundary but the adjacent quad is on a flat manifold, the flat manifold on the quad will take the points on the deformed line into account when interpolating the position of the additional points inside the quad and thus always result in a well-defined transformation.

        @@ -800,18 +800,18 @@

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -868,35 +868,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -955,21 +955,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1021,40 +1021,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1365,7 +1365,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 2024-12-27 18:25:06.984861242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 2024-12-27 18:25:06.992861297 +0000 @@ -729,18 +729,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -795,35 +795,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -880,21 +880,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -944,40 +944,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1314,7 +1314,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • @@ -1376,7 +1376,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-12-27 18:25:07.064861791 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 2024-12-27 18:25:07.068861819 +0000 @@ -866,18 +866,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -932,35 +932,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1017,21 +1017,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1081,40 +1081,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1451,7 +1451,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 2024-12-27 18:25:07.160862450 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 2024-12-27 18:25:07.156862423 +0000 @@ -516,7 +516,7 @@ const std::function< std::vector< Point< spacedim > >(const typename Triangulation< dim, spacedim >::cell_iterator &)> & compute_points_on_cell&#href_anchor"memdoc"> -

          Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

          +

          Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

          Note
          If multiple threads are enabled, this function will run in parallel, invoking the function passed in several times. Thus, in case MultithreadInfo::n_threads()>1, the user code must make sure that the function, typically a lambda, does not write into data shared with other threads.
          The cache is invalidated upon the signal Triangulation::Signals::any_change of the underlying triangulation.
          @@ -1087,18 +1087,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -1153,35 +1153,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1238,21 +1238,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1302,40 +1302,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1672,7 +1672,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • @@ -1734,7 +1734,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 2024-12-27 18:25:07.228862917 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 2024-12-27 18:25:07.236862972 +0000 @@ -897,18 +897,18 @@

          Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

          • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

            Jacobians of spacedim-vector valued differentiable functions are transformed this way.

            In the case when dim=spacedim the previous formula reduces to

            -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

          @@ -963,35 +963,35 @@

        Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

        • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

          -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

          +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

        • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

          -\[
+u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

        @@ -1048,21 +1048,21 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

          ,

          where

          -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

        Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

        In the case when dim=spacedim the previous formula reduces to

        -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

        Parameters
        @@ -1112,40 +1112,40 @@
        -

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

        +

        Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

        The mapping kinds currently implemented by derived classes are:

        • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

        • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

        • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

          -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

          +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1386.png"/>

        @@ -1482,7 +1482,7 @@

        Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

        -

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        +

        Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

        • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
        • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
        • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 2024-12-27 18:25:07.276863247 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 2024-12-27 18:25:07.276863247 +0000 @@ -387,7 +387,7 @@

        Number of shape functions. If this is a Q1 mapping, then it is simply the number of vertices per cell. However, since also derived classes use this class (e.g. the Mapping_Q() class), the number of shape functions may also be different.

        -

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        +

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        Definition at line 372 of file mapping_q.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-12-27 18:25:07.416864208 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-12-27 18:25:07.424864263 +0000 @@ -401,7 +401,7 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        Definition at line 2000 of file quadrature_generator.cc.

        @@ -431,7 +431,7 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        Definition at line 2008 of file quadrature_generator.cc.

        @@ -461,8 +461,8 @@
        -

        Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        -
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.
        +

        Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

        +
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.

        Definition at line 2017 of file quadrature_generator.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-12-27 18:25:07.448864428 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-12-27 18:25:07.452864455 +0000 @@ -388,7 +388,7 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        Definition at line 1871 of file quadrature_generator.cc.

        @@ -418,7 +418,7 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        Definition at line 1880 of file quadrature_generator.cc.

        @@ -448,8 +448,8 @@
        -

        Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        -
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.
        +

        Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

        +
        Note
        The normal at the quadrature points will be parallel to $\nabla \psi$.

        Definition at line 1889 of file quadrature_generator.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-12-27 18:25:07.544865087 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-12-27 18:25:07.548865115 +0000 @@ -520,7 +520,7 @@ const unsigned int quadrature_point&#href_anchor"memdoc">

        Returns the surface gradient of the shape function with index function_no at the quadrature point with index quadrature_point.

        -

        The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

        +

        The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients | update_normal_vectors flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 137 of file fe_immersed_values.cc.

        @@ -695,7 +695,7 @@

        If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
        iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
        q_pointNumber of the quadrature point at which function is to be evaluated
        @@ -734,7 +734,7 @@

        Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated.
        iNumber of the shape function $\varphi_i$ to be evaluated.
        q_pointNumber of the quadrature point at which function is to be evaluated.
        componentvector component to be evaluated.
        @@ -771,7 +771,7 @@

        The same holds for the arguments of this function as for the shape_value() function.

        Parameters
        - +
        iNumber of the shape function $\varphi_i$ to be evaluated.
        iNumber of the shape function $\varphi_i$ to be evaluated.
        q_pointNumber of the quadrature point at which function is to be evaluated.
        @@ -965,11 +965,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
        [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
        -
        Postcondition
        values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
        +
        Postcondition
        values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 487 of file fe_values_base.cc.

        @@ -999,7 +999,7 @@

        This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
        +
        Postcondition
        values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 505 of file fe_values_base.cc.

        @@ -1160,11 +1160,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
        +
        Postcondition
        gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1196,7 +1196,7 @@

        This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 683 of file fe_values_base.cc.

        @@ -1303,11 +1303,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
        +
        Postcondition
        hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1344,7 +1344,7 @@

        This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 786 of file fe_values_base.cc.

        @@ -1451,11 +1451,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
        [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
        -
        Postcondition
        laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
        +
        Postcondition
        laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
        For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        @@ -1489,7 +1489,7 @@

        This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
        +
        Postcondition
        laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
        For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1636,11 +1636,11 @@
        Parameters
        - +
        [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
        [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
        -
        Postcondition
        third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
        +
        Postcondition
        third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
        Note
        The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -1677,7 +1677,7 @@

        This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

        -
        Postcondition
        third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        +
        Postcondition
        third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

        Definition at line 1006 of file fe_values_base.cc.

        @@ -1992,7 +1992,7 @@

        Mapped quadrature weight. If this object refers to a volume evaluation (i.e. the derived class is of type FEValues), then this is the Jacobi determinant times the weight of the q_pointth unit quadrature point.

        For surface evaluations (i.e. classes FEFaceValues or FESubfaceValues), it is the mapped surface element times the weight of the quadrature point.

        -

        You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

        +

        You can think of the quantity returned by this function as the volume or surface element $dx, ds$ in the integral that we implement here by quadrature.

        Note
        For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
        @@ -2045,7 +2045,7 @@
      -

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      +

      Return the Jacobian of the transformation at the specified quadrature point, i.e. $J_{ij}=dx_i/d\hat x_j$

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2099,7 +2099,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2153,7 +2153,7 @@
      -

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      +

      Return the second derivative of the transformation from unit to real cell, i.e. the first derivative of the Jacobian, at the specified quadrature point, pushed forward to the real cell coordinates, i.e. $G_{ijk}=dJ_{iJ}/d\hat x_K (J_{jJ})^{-1} (J_{kK})^{-1}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_pushed_forward_grads flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2207,7 +2207,7 @@
      -

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      +

      Return the third derivative of the transformation from unit to real cell, i.e. the second derivative of the Jacobian, at the specified quadrature point, i.e. $G_{ijkl}=\frac{d^2J_{ij}}{d\hat x_k d\hat x_l}$.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_jacobian_2nd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -2261,8 +2261,8 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-12-27 18:25:07.596865444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-12-27 18:25:07.596865444 +0000 @@ -173,11 +173,11 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FEInterfaceValues< dim >

      This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2117.png"/>

      which we as before refer to as the "inside" and "outside" regions of the face.

      @@ -210,7 +210,7 @@
      }
      }
      -

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      +

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      Definition at line 488 of file fe_values.h.

      Member Typedef Documentation

      @@ -364,7 +364,7 @@ - + @@ -502,7 +502,7 @@
      mapping_collectionCollection of Mappings to be used.
      fe_collectionCollection of FiniteElements to be used.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
      mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
      region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
      -

      Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      +

      Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

      Definition at line 537 of file fe_values.cc.

      @@ -525,7 +525,7 @@
      -

      Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      +

      Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

      Definition at line 549 of file fe_values.cc.

      @@ -556,7 +556,7 @@
      -

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

      +

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

      Definition at line 397 of file fe_values.cc.

      @@ -847,7 +847,7 @@
      -

      FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 702 of file fe_values.h.

      @@ -874,7 +874,7 @@
      -

      FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 711 of file fe_values.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-12-27 18:25:07.632865691 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-12-27 18:25:07.640865746 +0000 @@ -177,17 +177,17 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FEValues< dim >

      This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

      -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

      +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2114.png"/>

      Thus we need quadrature rules for these 3 regions:

      -

      As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

      +

      As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

      for (const auto &cell : dof_handler.active_cell_iterators())
      {
      @@ -208,7 +208,7 @@
      }
      std::optional<::FEValues< dim > > fe_values_inside
      Definition fe_values.h:397
      -

      Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

      +

      Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

      Definition at line 143 of file fe_values.h.

      Member Typedef Documentation

      @@ -359,7 +359,7 @@ - + @@ -464,7 +464,7 @@
      mapping_collectionCollection of Mappings to be used.
      fe_collectionCollection of FiniteElements to be used.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
      q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
      mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
      region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
      -

      Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

      Definition at line 306 of file fe_values.cc.

      @@ -487,7 +487,7 @@
      -

      Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

      Definition at line 318 of file fe_values.cc.

      @@ -510,7 +510,7 @@
      -

      Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

      +

      Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

      Note
      If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

      Definition at line 330 of file fe_values.cc.

      @@ -583,7 +583,7 @@
      -

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

      +

      Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

      Definition at line 101 of file fe_values.cc.

      @@ -800,7 +800,7 @@
      -

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

      +

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

      When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

      This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

      @@ -829,7 +829,7 @@
      -

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

      +

      For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

      When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

      This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

      @@ -858,7 +858,7 @@
      -

      FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 397 of file fe_values.h.

      @@ -885,7 +885,7 @@
      -

      FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 406 of file fe_values.h.

      @@ -912,7 +912,7 @@
      -

      FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      +

      FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

      Definition at line 415 of file fe_values.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-12-27 18:25:07.664865911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-12-27 18:25:07.672865966 +0000 @@ -149,16 +149,16 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FaceQuadratureGenerator< dim >

      This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

      -

      In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

      -\[
+class NonMatching::FaceQuadratureGenerator< dim ></div><p>This class creates immersed quadrature rules over a face, <picture><source srcset=$F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

      +

      In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

      +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \}, \\
 S = \{x \in F : \psi(x) = 0 \},
-\] +\]" src="form_2157.png"/>

      -

      which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

      -

      Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

      +

      which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

      +

      Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

      Definition at line 305 of file quadrature_generator.h.

      Member Typedef Documentation

      @@ -305,7 +305,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2000 of file quadrature_generator.cc.

      @@ -327,7 +327,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2008 of file quadrature_generator.cc.

      @@ -349,8 +349,8 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 2017 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-12-27 18:25:07.696866131 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-12-27 18:25:07.700866158 +0000 @@ -282,7 +282,7 @@

      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2098 of file quadrature_generator.cc.

      @@ -304,7 +304,7 @@

      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2105 of file quadrature_generator.cc.

      @@ -324,7 +324,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

      Note
      In 1d, this quadrature always contains 0 points.

      Definition at line 2113 of file quadrature_generator.cc.

      @@ -371,7 +371,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

      +

      Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

      Definition at line 498 of file quadrature_generator.h.

      @@ -396,7 +396,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

      +

      Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

      Definition at line 505 of file quadrature_generator.h.

      @@ -421,7 +421,7 @@
      -

      Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

      +

      Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

      Definition at line 512 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-12-27 18:25:07.720866296 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-12-27 18:25:07.728866351 +0000 @@ -139,41 +139,41 @@

      Detailed Description

      template<int dim, int spacedim = dim>
      -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

      This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

      -

      The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

      -

      Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

      -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

      +

      The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

      +

      Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

      +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2131.png"/>

      -

      where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

      -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

      +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2134.png"/>

      for each quadrature point. The surface integral in real space would then be approximated as

      -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2135.png"/>

      -

      When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

      -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

      +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2141.png"/>

      -

      where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

      +

      where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

      Definition at line 106 of file immersed_surface_quadrature.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-12-27 18:25:07.752866515 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-12-27 18:25:07.756866543 +0000 @@ -147,24 +147,24 @@

      Detailed Description

      template<int dim>
      -class NonMatching::QuadratureGenerator< dim >

      This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

      +class NonMatching::QuadratureGenerator< dim >

      This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

      This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

      -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi(x) < 0 \}, \\
 P = \{x \in B : \psi(x) > 0 \}, \\
 S = \{x \in B : \psi(x) = 0 \}.
-\] +\]" src="form_2151.png"/>

      -

      When working with level set functions, the most common is to describe a domain, $\Omega$, as

      -\[
+<p>When working with level set functions, the most common is to describe a domain, <picture><source srcset=$\Omega$, as

      +\[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
-\] +\]" src="form_2152.png"/>

      -

      Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

      -

      The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

      +

      Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

      +

      The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

      A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -305,7 +305,7 @@

      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1871 of file quadrature_generator.cc.

      @@ -327,7 +327,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1880 of file quadrature_generator.cc.

      @@ -349,8 +349,8 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 1889 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-12-27 18:25:07.804866872 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-12-27 18:25:07.808866900 +0000 @@ -258,7 +258,7 @@

      where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

      Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

      $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

      -

      which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

      +

      which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

      Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

      Definition at line 1335 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-12-27 18:25:07.844867147 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-12-27 18:25:07.852867202 +0000 @@ -163,20 +163,20 @@

      Detailed Description

      template<int dim, int spacedim>
      class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

      This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

      -

      The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

      -

      If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

      -

      If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

      -

      The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

      -

      $|\frac{\partial \psi}{\partial x_i}| > 0$.

      +

      The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

      +

      If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

      +

      If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

      +

      The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

      +

      $|\frac{\partial \psi}{\partial x_i}| > 0$.

      throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

      -

      $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

      +

      $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

      so that

      -

      $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

      -

      over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

      -

      If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

      +

      $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

      +

      over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

      +

      If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

      When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

      As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

      -

      When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

      +

      When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

      As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

      Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

      @@ -326,7 +326,7 @@
      -

      Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

      +

      Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

      Definition at line 1141 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:07.880867394 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:07.888867449 +0000 @@ -164,8 +164,8 @@

      Detailed Description

      template<int spacedim>
      class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

      The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

      -

      Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

      -

      If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

      +

      Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

      +

      If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

      Definition at line 1276 of file quadrature_generator.h.

      Constructor & Destructor Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-12-27 18:25:07.912867614 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-12-27 18:25:07.916867641 +0000 @@ -134,19 +134,19 @@

      Detailed Description

      template<int dim>
      -class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

      Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

      -

      $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

      -

      the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

      +class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

      Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

      +

      $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

      +

      the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

      -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2176.png"/>

      -

      Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

      -

      $S = \{x \in B : \psi(x) = 0 \}$.

      -

      Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

      +

      Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

      +

      $S = \{x \in B : \psi(x) = 0 \}$.

      +

      Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

      Definition at line 815 of file quadrature_generator.h.

      Member Function Documentation

      @@ -208,7 +208,7 @@
      -

      Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

      Definition at line 835 of file quadrature_generator.h.

      @@ -227,7 +227,7 @@
      -

      Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

      Definition at line 841 of file quadrature_generator.h.

      @@ -265,7 +265,7 @@
      -

      Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

      +

      Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

      Definition at line 853 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-12-27 18:25:07.940867806 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-12-27 18:25:07.948867861 +0000 @@ -135,7 +135,7 @@  

      Detailed Description

      -

      A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

      +

      A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

      The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

      Definition at line 664 of file quadrature_generator.h.

      @@ -181,7 +181,7 @@ std::vector< double > & roots&#href_anchor"memdoc"> -

      For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

      +

      For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

      Definition at line 533 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-12-27 18:25:07.972868026 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-12-27 18:25:07.976868053 +0000 @@ -144,13 +144,13 @@

      Detailed Description

      template<int dim, int spacedim>
      -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

      This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

      -

      To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

      -

      For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

      -

      In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

      -

      When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

      -

      $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

      -

      where $i$ is the height function direction.

      +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

      This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

      +

      To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

      +

      For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

      +

      In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

      +

      When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

      +

      $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

      +

      where $i$ is the height function direction.

      Definition at line 896 of file quadrature_generator.h.

      Constructor & Destructor Documentation

      @@ -210,7 +210,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

      Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

      +

      Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

      Definition at line 748 of file quadrature_generator.cc.

      @@ -283,7 +283,7 @@

      Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

      -

      This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

      +

      This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

      Definition at line 804 of file quadrature_generator.cc.

      @@ -392,7 +392,7 @@
      -

      1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

      +

      1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

      Definition at line 966 of file quadrature_generator.h.

      @@ -446,7 +446,7 @@
      -

      The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

      +

      The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

      Definition at line 979 of file quadrature_generator.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-12-27 18:25:08.004868246 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-12-27 18:25:08.012868301 +0000 @@ -530,11 +530,11 @@

      A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian).

      The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

      -

      In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

      +

      In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

      Parameters
      - +
      current_uCurrent value of $u$
      current_uCurrent value of $u$
      @@ -562,7 +562,7 @@
      Parameters
      - +
      [in]rhsThe system right hand side to solve for.
      [out]dstThe solution of $J^{-1} * \texttt{src}$.
      [out]dstThe solution of $J^{-1} * \texttt{src}$.
      [in]toleranceThe tolerance with which to solve the linear system of equations.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-12-27 18:25:08.040868493 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-12-27 18:25:08.044868520 +0000 @@ -260,7 +260,7 @@ solver_typeNonlinear solver type. strategyMethod of solving the nonlinear problem. maximum_non_linear_iterationsMaximum number of nonlinear iterations. - function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. + function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. relative_toleranceRelative stopping tolerance. step_toleranceTolerance for minimum scaled step length anderson_subspace_sizeSize of the Anderson acceleration subspace, use 0 to disable. @@ -343,7 +343,7 @@
      -

      A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

      +

      A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

      If set to zero, default values will be used.

      Definition at line 177 of file nonlinear.h.

      @@ -363,7 +363,7 @@
      -

      Relative $l_2$ tolerance of the residual to be reached.

      +

      Relative $l_2$ tolerance of the residual to be reached.

      Note
      Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

      Definition at line 185 of file nonlinear.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-12-27 18:25:08.084868795 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-12-27 18:25:08.088868822 +0000 @@ -573,7 +573,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -603,24 +603,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -629,11 +629,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-12-27 18:25:08.132869125 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-12-27 18:25:08.136869152 +0000 @@ -487,7 +487,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -496,7 +496,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-12-27 18:25:08.180869454 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-12-27 18:25:08.184869482 +0000 @@ -448,7 +448,7 @@
      -

      Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function.

      Refer to the general documentation of this class for more information.

      @@ -637,7 +637,7 @@
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -667,24 +667,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -693,11 +693,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-12-27 18:25:08.224869756 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-12-27 18:25:08.228869784 +0000 @@ -481,7 +481,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -490,7 +490,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-12-27 18:25:08.272870086 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-12-27 18:25:08.276870113 +0000 @@ -481,7 +481,7 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

      Note
      If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
      Parameters
      @@ -490,7 +490,7 @@
      -
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
      +
      Returns
      A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

      Reimplemented from Manifold< dim, spacedim >.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-12-27 18:25:08.320870415 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-12-27 18:25:08.324870443 +0000 @@ -286,7 +286,7 @@

      Detailed Description

      template<typename VectorType>
      class PArpackSolver< VectorType >

      Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

      -

      In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

      +

      In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

      The ArpackSolver can be used in application codes in the following way:

      const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
      @@ -311,8 +311,8 @@
      const AdditionalData additional_data
      -

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

      -

      Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

      +

      for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

      +

      Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

      The OP can be specified by using a LinearOperator:

      const double shift = 5.0;
      const auto op_A = linear_operator<vector_t>(A);
      const auto op_B = linear_operator<vector_t>(B);
      @@ -645,7 +645,7 @@ const unsigned int n_eigenvalues&#href_anchor"memdoc"> -

      Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

      +

      Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

      In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

      Definition at line 769 of file parpack_solver.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-12-27 18:25:08.356870663 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-12-27 18:25:08.360870690 +0000 @@ -304,7 +304,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

      Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

      -

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      +

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

      Definition at line 49 of file petsc_communication_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-12-27 18:25:08.416871075 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-12-27 18:25:08.420871103 +0000 @@ -1518,8 +1518,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1547,8 +1547,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -1604,7 +1604,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2351,7 +2351,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2385,8 +2385,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-12-27 18:25:08.504871680 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-12-27 18:25:08.496871625 +0000 @@ -888,7 +888,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Definition at line 408 of file petsc_block_sparse_matrix.h.

      @@ -1000,7 +1000,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 439 of file petsc_block_sparse_matrix.h.

      @@ -2050,7 +2050,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2624,7 +2624,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2732,7 +2732,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:08.576872174 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:08.580872201 +0000 @@ -1956,7 +1956,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1982,7 +1982,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -2034,7 +2034,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -2060,7 +2060,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -2086,7 +2086,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -2121,7 +2121,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2368,7 +2368,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-12-27 18:25:08.652872696 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-12-27 18:25:08.656872723 +0000 @@ -827,7 +827,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      @@ -850,7 +850,7 @@ const Vector & v&#href_anchor"memdoc"> -

      Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Definition at line 814 of file petsc_parallel_sparse_matrix.cc.

      @@ -2072,8 +2072,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -2101,8 +2101,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -2158,7 +2158,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2905,7 +2905,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2939,8 +2939,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:08.724873190 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:08.728873218 +0000 @@ -1941,7 +1941,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Definition at line 604 of file petsc_vector_base.cc.

      @@ -1997,7 +1997,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      Note
      In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

      Definition at line 664 of file petsc_vector_base.cc.

      @@ -2026,7 +2026,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      Definition at line 677 of file petsc_vector_base.cc.

      @@ -2054,7 +2054,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      Definition at line 690 of file petsc_vector_base.cc.

      @@ -2082,7 +2082,7 @@
      -

      $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

      +

      $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

      Definition at line 732 of file petsc_vector_base.cc.

      @@ -2119,7 +2119,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)

      The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for PETSc vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Definition at line 529 of file petsc_vector_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-12-27 18:25:08.784873602 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-12-27 18:25:08.788873630 +0000 @@ -1303,8 +1303,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1324,8 +1324,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -1365,7 +1365,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -1972,7 +1972,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2006,8 +2006,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-12-27 18:25:08.864874151 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-12-27 18:25:08.868874179 +0000 @@ -1962,8 +1962,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1991,8 +1991,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -2048,7 +2048,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2675,7 +2675,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2709,8 +2709,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-12-27 18:25:08.900874399 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-12-27 18:25:08.904874426 +0000 @@ -194,7 +194,7 @@
      Mat & petsc_matrix();
      ...

      In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

      -

      To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

      +

      To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

      The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

      Users can also provide the implementations of the Jacobian. This can be accomplished in two ways:

      • PETSc style using NonlinearSolver::jacobian
      • /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-12-27 18:25:08.972874893 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-12-27 18:25:08.980874948 +0000 @@ -1952,8 +1952,8 @@
        -

        Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        +

        Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        Definition at line 418 of file petsc_matrix_base.cc.

        @@ -1981,8 +1981,8 @@
        -

        Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        +

        Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

        Definition at line 431 of file petsc_matrix_base.cc.

        @@ -2038,7 +2038,7 @@
        -

        Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

        +

        Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

        Obviously, the matrix needs to be quadratic for this operation.

        The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

        Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

        @@ -2785,7 +2785,7 @@

        Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        -

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        +

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

        Definition at line 644 of file petsc_matrix_base.cc.

        @@ -2819,8 +2819,8 @@
        -

        Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        -

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        +

        Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

        +

        This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

        The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

        Definition at line 652 of file petsc_matrix_base.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-12-27 18:25:09.028875278 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-12-27 18:25:09.032875305 +0000 @@ -216,20 +216,20 @@
        template<typename VectorType = PETScWrappers::VectorBase, typename PMatrixType = PETScWrappers::MatrixBase, typename AMatrixType = PMatrixType>
        class PETScWrappers::TimeStepper< VectorType, PMatrixType, AMatrixType >

        Interface to the PETSc TS solver for Ordinary Differential Equations and Differential-Algebraic Equations. The TS solver is described in the PETSc manual. This class is used and extensively discussed in step-86.

        This class supports two kinds of formulations. The explicit formulation:

        -\[
+<picture><source srcset=\[
       \begin{cases}
           \dot y = G(t,y)\, , \\
           y(t_0) = y_0\, , \\
       \end{cases}
-    \] + \]" src="form_1821.png"/>

        and the implicit formulation:

        -\[
+<picture><source srcset=\[
       \begin{cases}
           F(t,y,\dot y) = 0\, , \\
           y(t_0) = y_0\, . \\
       \end{cases}
-    \] + \]" src="form_1822.png"/>

        The interface to PETSc is realized by means of std::function callbacks like in the SUNDIALS::IDA (which also solves implicit ODES) and SUNDIALS::ARKode classes (which solves a slightly generalized form of the explicit formulation above that also allows for a mass matrix on the left hand side).

        TimeStepper supports any vector and matrix type having constructors and methods:

        @@ -247,7 +247,7 @@
        Mat & petsc_matrix();
        ...

        In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

        -

        To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

        +

        To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

        The default linearization procedure of an implicit solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations that are ODE-solver specific. For details, consult the PETSc manual.

        Users can also provide the implementations of the Jacobians. This can be accomplished in two ways:

        Callback for the computation of the implicit Jacobian $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

        Definition at line 501 of file petsc_ts.h.

        @@ -788,7 +788,7 @@

        Callback for the set up of the Jacobian system.

        This callback gives full control to users to set up the linearized equations $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Solvers must be provided via TimeStepper::solve_with_jacobian.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-12-27 18:25:09.092875717 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-12-27 18:25:09.096875745 +0000 @@ -1169,7 +1169,7 @@
        -

        Return the square of the $l_2$-norm.

        +

        Return the square of the $l_2$-norm.

        Definition at line 604 of file petsc_vector_base.cc.

        @@ -1209,7 +1209,7 @@
        -

        $l_1$-norm of the vector. The sum of the absolute values.

        +

        $l_1$-norm of the vector. The sum of the absolute values.

        Note
        In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

        Definition at line 664 of file petsc_vector_base.cc.

        @@ -1230,7 +1230,7 @@
        -

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        +

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        Definition at line 677 of file petsc_vector_base.cc.

        @@ -1250,7 +1250,7 @@
        -

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        +

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        Definition at line 690 of file petsc_vector_base.cc.

        @@ -1270,7 +1270,7 @@
        -

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        +

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        Definition at line 732 of file petsc_vector_base.cc.

        @@ -1300,7 +1300,7 @@
        return_value = *this * W;
        void add(const std::vector< size_type > &indices, const std::vector< PetscScalar > &values)

        The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for PETSc vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

        -

        For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

        +

        For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

        Definition at line 529 of file petsc_vector_base.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-12-27 18:25:09.244876761 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-12-27 18:25:09.248876788 +0000 @@ -1057,7 +1057,7 @@ const typename Triangulation< dim, spacedim >::active_cell_iterator & cell&#href_anchor"memdoc">

        Insert a particle into the collection of particles. Return an iterator to the new position of the particle. This function involves a copy of the particle and its properties. Note that this function is of $O(N \log
-N)$ complexity for $N$ particles.

        +N)$" src="form_2511.png"/> complexity for $N$ particles.

        Definition at line 578 of file particle_handler.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-12-27 18:25:09.284877035 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-12-27 18:25:09.292877090 +0000 @@ -677,7 +677,7 @@
        -

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        +

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        Definition at line 191 of file property_pool.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-12-27 18:25:09.468878299 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-12-27 18:25:09.472878326 +0000 @@ -2225,7 +2225,7 @@
        -

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        +

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

        Note
        This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
        @@ -6947,7 +6947,7 @@
        -

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        +

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        Note
        The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-12-27 18:25:09.536878766 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-12-27 18:25:09.540878793 +0000 @@ -879,7 +879,7 @@
        -

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        +

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        Note
        This function can also be used in device code.
        @@ -1491,7 +1491,7 @@
        -

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        +

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        @@ -1517,7 +1517,7 @@
        -

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        +

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        @@ -2168,11 +2168,11 @@

        Entrywise multiplication of two tensor objects of general rank.

        This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

        -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

        Template Parameters
        @@ -2207,17 +2207,17 @@
        -

        The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

        Note
        For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
        -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
        +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
  • Definition at line 3039 of file tensor.h.

    @@ -2245,7 +2245,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3065 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-12-27 18:25:09.584879095 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-12-27 18:25:09.592879150 +0000 @@ -458,7 +458,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -716,7 +716,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -748,24 +748,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -774,11 +774,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-12-27 18:25:09.620879343 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-12-27 18:25:09.628879397 +0000 @@ -156,7 +156,7 @@
    template<int dim>
    class PolynomialsBernardiRaugel< dim >

    This class implements the Bernardi-Raugel polynomials similarly to the description in the Mathematics of Computation paper from 1985 by Christine Bernardi and Geneviève Raugel.

    The Bernardi-Raugel polynomials are originally defined as an enrichment of the $(P_1)^d$ elements on simplicial meshes for Stokes problems by the addition of bubble functions, yielding a locking-free finite element which is a subset of $(P_2)^d$ elements. This implementation is an enrichment of $(Q_1)^d$ elements which is a subset of $(Q_2)^d$ elements for quadrilateral and hexahedral meshes.

    -

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    +

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    2d bubble functions (in order)

    $x=0$ edge: $\mathbf{p}_1 = \mathbf{n}_1 (1-x)(y)(1-y)$

     @f$x=1@f$ edge: @f$\mathbf{p}_2 = \mathbf{n}_2 (x)(y)(1-y)@f$
     
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html differs (HTML document, ASCII text, with very long lines)
    --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-12-27 18:25:09.664879645 +0000
    +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-12-27 18:25:09.664879645 +0000
    @@ -1245,7 +1245,7 @@
       
     
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2024-12-27 18:25:09.700879892 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsRT__Bubbles.html 2024-12-27 18:25:09.700879892 +0000 @@ -152,18 +152,18 @@

    This space is of the form Vk = RTk-1 + Bk, where Bk is defined as follows:

    In 2d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
  B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2}\begin{pmatrix} (a_2+1) x \\
    -a_1 y \end{pmatrix}\text{ : } a_2=k \right\} \\
  B_k^2(E) = \text{span}\left\{x^{b_1} y^{b_2-1}\begin{pmatrix} -b_2 x \\
     (b_1+1) y \end{pmatrix}\text{ : } b_1=k \right\}
-\end{align*} +\end{align*}" src="form_718.png"/>

    In 3d:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   B_k^1(E) = \text{span}\left\{x^{a_1-1} y^{a_2} z^{a_3}\begin{pmatrix}
 (a_2+a_3+2) x \\
     -a_1 y \\ -a_1 z \end{pmatrix}\text{ : } a_2=k \text{ or } a_3=k
@@ -175,11 +175,11 @@
   B_k^3(E) = \text{span}\left\{x^{c_1}y^{c_2}z^{c_3-1}\begin{pmatrix} -c_3 x
 \\ -c_3y \\ (c_1+c_2+2)z \end{pmatrix}\text{ : } c_1=k \text{ or } c_2=k
 \right\},
- \end{align*} + \end{align*}" src="form_719.png"/>

    -

    where $0 \le a_1, a_2, a_3 \le k$.

    +

    where $0 \le a_1, a_2, a_3 \le k$.

    Note
    Unlike the classical Raviart-Thomas space, the lowest order for the enhanced space is 1, similarly to the Brezzi-Douglas-Marini (BDM) polynomial space.

    The total dimension of the space dim(Vk) = d*(k+1)^d, where d is the space dimension. This allows to associate shape functions with the Gauss-Lobatto quadrature points as shown in the figures below.

    @@ -190,7 +190,7 @@

    - +
    Left - $2d,\,k=3$, right - $3d,\,k=2$.
    Left - $2d,\,k=3$, right - $3d,\,k=2$.

    Definition at line 90 of file polynomials_rt_bubbles.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-12-27 18:25:09.744880194 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-12-27 18:25:09.748880221 +0000 @@ -1197,7 +1197,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-12-27 18:25:09.792880524 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-12-27 18:25:09.796880551 +0000 @@ -1213,7 +1213,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-12-27 18:25:09.840880853 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-12-27 18:25:09.840880853 +0000 @@ -1311,7 +1311,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-12-27 18:25:09.880881128 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-12-27 18:25:09.884881155 +0000 @@ -1226,7 +1226,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-12-27 18:25:09.932881485 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-12-27 18:25:09.928881457 +0000 @@ -1187,7 +1187,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-12-27 18:25:09.968881732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-12-27 18:25:09.972881760 +0000 @@ -232,7 +232,7 @@

    Detailed Description

    Lobatto polynomials of arbitrary degree on [0,1].

    -

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    +

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    Calling the constructor with a given index k will generate the polynomial with index k. But only for $k\geq 1$ the index equals the degree of the polynomial. For k==0 also a polynomial of degree 1 is generated.

    These polynomials are used for the construction of the shape functions of Nédélec elements of arbitrary order.

    @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-12-27 18:25:10.016882062 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-12-27 18:25:10.024882117 +0000 @@ -1290,7 +1290,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-12-27 18:25:10.068882419 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-12-27 18:25:10.072882446 +0000 @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-12-27 18:25:10.116882748 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-12-27 18:25:10.120882776 +0000 @@ -1210,7 +1210,7 @@
    -

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    +

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    Definition at line 131 of file polynomials_hermite.h.

    @@ -1336,7 +1336,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-12-27 18:25:10.152882995 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-12-27 18:25:10.160883050 +0000 @@ -226,7 +226,7 @@ x^{n+1} = x^{n} + \alpha P^{-1} (b-Ax^n). \]" src="form_1830.png"/>

    -

    The relaxation parameter $\alpha$ has to be in the range:

    +

    The relaxation parameter $\alpha$ has to be in the range:

    \[
  0 < \alpha < \frac{2}{\lambda_{\max}(P^{-1}A)}.
 \] /usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-12-27 18:25:10.188883243 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-12-27 18:25:10.196883298 +0000 @@ -234,8 +234,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-12-27 18:25:10.220883462 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-12-27 18:25:10.220883462 +0000 @@ -122,7 +122,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
     QGaussChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussChebyshev< dim >

    Gauss-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
+class QGaussChebyshev< dim ></div><p>Gauss-Chebyshev quadrature rules integrate the weighted product <picture><source srcset=$\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.38

    Definition at line 558 of file quadrature_lib.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-12-27 18:25:10.240883600 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-12-27 18:25:10.244883627 +0000 @@ -123,7 +123,7 @@ class QGaussLobatto< dim >

    The Gauss-Lobatto family of quadrature rules for numerical integration.

    This modification of the Gauss quadrature uses the two interval end points as well. Being exact for polynomials of degree 2n-3, this formula is suboptimal by two degrees.

    The quadrature points are interval end points plus the roots of the derivative of the Legendre polynomial Pn-1 of degree n-1. The quadrature weights are 2/(n(n-1)(Pn-1(xi)2).

    -
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
+<dl class=
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
 = \beta = 0$) is a special case.
    See also
    http://en.wikipedia.org/wiki/Handbook_of_Mathematical_Functions
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-12-27 18:25:10.264883765 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-12-27 18:25:10.268883792 +0000 @@ -122,7 +122,7 @@
     QGaussLobattoChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    +class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    Definition at line 627 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-12-27 18:25:10.292883957 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-12-27 18:25:10.296883984 +0000 @@ -132,8 +132,8 @@
    &#href_anchor"memitem:a9a003e3342b551507a0bab3fee019e40" id="r_a9a003e3342b551507a0bab3fee019e40">static std::vector< double > get_quadrature_weights (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLog< dim >

    A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate $\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    -

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
+class QGaussLog< dim ></div><p>A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate <picture><source srcset=$\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    +

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
 f(x) \ln|x| dx = \sum_{i=0}^N w_i f(q_i)$. Setting the revert flag to true at construction time switches the weight from $\ln|x|$ to $\ln|1-x|$.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-12-27 18:25:10.320884149 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-12-27 18:25:10.324884177 +0000 @@ -128,15 +128,15 @@

    Detailed Description

    template<int dim>
    -class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    -

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    +class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    +

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    This quadrature formula is rather expensive, since it uses internally two Gauss quadrature formulas of order n to integrate the nonsingular part of the factor, and two GaussLog quadrature formulas to integrate on the separate segments $[0,x_0]$ and $[x_0,1]$. If the singularity is one of the extremes and the factor alpha is 1, then this quadrature is the same as QGaussLog.

    The last argument from the constructor allows you to use this quadrature rule in one of two possible ways:

    \[ \int_0^1 g(x) dx = \int_0^1 f(x)
 \ln\left(\frac{|x-x_0|}{\alpha}\right) dx = \sum_{i=0}^N w_i g(q_i) =
 \sum_{i=0}^N \bar{w}_i f(q_i) \]

    -

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    +

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    Notice that this quadrature rule is worthless if you try to use it for regular functions once you factored out the singularity.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-12-27 18:25:10.356884396 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-12-27 18:25:10.364884451 +0000 @@ -132,9 +132,9 @@ static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_size (const Point< dim > &singularity, const unsigned int n) &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    -

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    -

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    +class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    +

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    +

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    Definition at line 356 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-12-27 18:25:10.388884616 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-12-27 18:25:10.392884643 +0000 @@ -139,7 +139,7 @@

    Detailed Description

    template<int dim>
    -class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    +class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    Definition at line 581 of file quadrature_lib.h.

    Member Enumeration Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-12-27 18:25:10.424884863 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-12-27 18:25:10.424884863 +0000 @@ -200,8 +200,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-12-27 18:25:10.452885056 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-12-27 18:25:10.456885084 +0000 @@ -315,7 +315,7 @@

    Remove first column and update QR factorization.

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
 R}^m$.

    -

    The standard approach is to partition $R$ as

    +

    The standard approach is to partition $R$ as

    \[
 R =
 \begin{bmatrix}
@@ -368,7 +368,7 @@
   </tr>
 </table>
 </div><div class= -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -402,7 +402,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -436,7 +436,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -470,7 +470,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-12-27 18:25:10.480885248 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-12-27 18:25:10.480885248 +0000 @@ -186,8 +186,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-12-27 18:25:10.512885468 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-12-27 18:25:10.512885468 +0000 @@ -148,7 +148,7 @@ \end{align*}" src="form_762.png"/>

    Since the library assumes $[0,1]$ as reference interval, we will map these values on the proper reference interval in the implementation.

    -

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    +

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    Singular quadrature formula are rather expensive, nevertheless Telles' quadrature formula are much easier to compute with respect to other singular integration techniques as Lachat-Watson.

    We have implemented the case for $dim = 1$. When we deal the case $dim >1$ we have computed the quadrature formula has a tensorial product of one dimensional Telles' quadrature formulas considering the different components of the singularity.

    The weights and functions for Gauss Legendre formula have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-12-27 18:25:10.532885605 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-12-27 18:25:10.540885660 +0000 @@ -223,8 +223,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    [in]verticesThe vertices of the simplex you wish to integrate on
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-12-27 18:25:10.560885798 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-12-27 18:25:10.564885825 +0000 @@ -124,7 +124,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    class QWitherdenVincentSimplex< dim >

    Witherden-Vincent rules for simplex entities.

    -

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    +

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    The given value for n_points_1d = 1, 2, 3, 4, 5, 6, 7 (where the last two are only implemented in 2d) results in the following number of quadrature points in 2d and 3d:

    • 2d: odd (default): 1, 6, 7, 15, 19, 28, 37
    • 2d: even: 3, 6, 12, 16, 25, 33, 42
    • @@ -202,8 +202,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

      -

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      -

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      +

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      +

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-12-27 18:25:10.612886155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-12-27 18:25:10.620886210 +0000 @@ -242,9 +242,9 @@

      At least for quadrilaterals and hexahedra (or, more precisely, since we work on reference cells: for the unit square and the unit cube), quadrature formulas are typically tensor products of one-dimensional formulas (see also the section on implementation detail below).

      In order to allow for dimension independent programming, a quadrature formula of dimension zero exists. Since an integral over zero dimensions is the evaluation at a single point, any constructor of such a formula initializes to a single quadrature point with weight one. Access to the weight is possible, while access to the quadrature point is not permitted, since a Point of dimension zero contains no information. The main purpose of these formulae is their use in QProjector, which will create a useful formula of dimension one out of them.

      Mathematical background

      -

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      +

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      Tensor product quadrature

      -

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      +

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      Other uses of this class

      Quadrature objects are used in a number of places within deal.II where integration is performed, most notably via the FEValues and related classes. Some of these classes are also used in contexts where no integrals are involved, but where functions need to be evaluated at specific points, for example to evaluate the solution at individual points or to create graphical output. Examples are the implementation of VectorTools::point_value() and the DataOut and related classes (in particular in connection with the DataPostprocessor class). In such contexts, one often creates specific "Quadrature" objects in which the "quadrature points" are simply the points (in the coordinate system of the reference cell) at which one wants to evaluate the solution. In these kinds of cases, the weights stored by the current class are not used and the name "quadrature object" is interpreted as "list of evaluation points".

      /usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-12-27 18:25:10.684886649 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-12-27 18:25:10.688886677 +0000 @@ -499,7 +499,7 @@
      [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2620 of file reference_cell.h.

    @@ -529,7 +529,7 @@
    -

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2709 of file reference_cell.h.

    @@ -1000,7 +1000,7 @@
    -

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    +

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    Definition at line 1878 of file reference_cell.h.

    @@ -1428,7 +1428,7 @@
    -

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    +

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    For ReferenceCells::Vertex, the reference cell is a zero-dimensional point in a zero-dimensional space. As a consequence, one cannot meaningfully define a volume for it. The function returns one for this case, because this makes it possible to define useful quadrature rules based on the center of a reference cell and its volume.

    Definition at line 2743 of file reference_cell.h.

    @@ -1460,9 +1460,9 @@

    Return the barycenter (i.e., the center of mass) of the reference cell that corresponds to the current object. The function is not called center() because one can define the center of an object in a number of different ways whereas the barycenter of a reference cell $K$ is unambiguously defined as

    -\[
+<picture><source srcset=\[
   \mathbf x_K = \frac{1}{V} \int_K \mathbf x \; dx
-\] +\]" src="form_1546.png"/>

    where $V$ is the volume of the reference cell (see also the volume() function).

    @@ -1494,7 +1494,7 @@
    -

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    +

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    The tolerance parameter may be less than zero, indicating that the point should be safely inside the cell.

    Definition at line 2807 of file reference_cell.h.

    @@ -1547,8 +1547,8 @@
    -

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    -
    Precondition
    $i$ must be between zero and dim-1.
    +

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    +
    Precondition
    $i$ must be between zero and dim-1.

    Definition at line 2916 of file reference_cell.h.

    @@ -2008,7 +2008,7 @@ const bool legacy_format&#href_anchor"memdoc"> -

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    +

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    The last argument, legacy_format, indicates whether to use the old, VTK legacy format (when true) or the new, VTU format (when false).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-12-27 18:25:10.716886869 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-12-27 18:25:10.716886869 +0000 @@ -544,7 +544,7 @@
    -

    Create and return a Teuchos smart reference counting pointer to the basis vector corresponding to the i ${}^{th}$ element of the wrapper vector.

    +

    Create and return a Teuchos smart reference counting pointer to the basis vector corresponding to the i ${}^{th}$ element of the wrapper vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-12-27 18:25:10.752887116 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverArnoldi.html 2024-12-27 18:25:10.756887143 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-12-27 18:25:10.788887363 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverBase.html 2024-12-27 18:25:10.796887418 +0000 @@ -270,7 +270,7 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -310,9 +310,9 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -360,9 +360,9 @@ const unsigned int n_eigenpairs = 1&#href_anchor"memdoc"> -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -566,8 +566,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -610,8 +610,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -639,7 +639,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -667,8 +667,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-12-27 18:25:10.832887665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverGeneralizedDavidson.html 2024-12-27 18:25:10.836887693 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-12-27 18:25:10.876887967 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverJacobiDavidson.html 2024-12-27 18:25:10.880887995 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-12-27 18:25:10.916888242 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverKrylovSchur.html 2024-12-27 18:25:10.920888270 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-12-27 18:25:10.960888544 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLAPACK.html 2024-12-27 18:25:10.964888572 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-12-27 18:25:11.000888819 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverLanczos.html 2024-12-27 18:25:11.004888846 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-12-27 18:25:11.048889148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSLEPcWrappers_1_1SolverPower.html 2024-12-27 18:25:11.052889176 +0000 @@ -265,7 +265,7 @@
    -

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    +

    Composite method that solves the eigensystem $Ax=\lambda x$. The eigenvector sent in has to have at least one element that we can use as a template when resizing, since we do not know the parameters of the specific vector class used (i.e. local_dofs for MPI vectors). However, while copying eigenvectors, at least twice the memory size of eigenvectors is being used (and can be more). To avoid doing this, the fairly standard calling sequence executed here is used: Set up matrices for solving; Actually solve the system; Gather the solution(s).

    Note
    Note that the number of converged eigenvectors can be larger than the number of eigenvectors requested; this is due to a round off error (success) of the eigenproblem solver context. If this is found to be the case we simply do not bother with more eigenpairs than requested, but handle that it may be more than specified by ignoring any extras. By default one eigenvector/eigenvalue pair is computed.

    This is declared here to make it possible to take a std::vector of different PETScWrappers vector types

    @@ -312,9 +312,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$, for real matrices, vectors, and values $A, B, x,
+\lambda$.

    Definition at line 737 of file slepc_solver.h.

    @@ -369,9 +369,9 @@
    -

    Same as above, but here a composite method for solving the system $A
-x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
-\lambda$.

    +

    Same as above, but here a composite method for solving the system $A
+x=\lambda B x$ with real matrices $A, B$ and imaginary eigenpairs $x,
+\lambda$.

    Definition at line 775 of file slepc_solver.h.

    @@ -660,8 +660,8 @@
    -

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 259 of file slepc_solver.cc.

    @@ -704,8 +704,8 @@
    -

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
-\mathrm{n\_converged}-1$.

    +

    Access the real and imaginary parts of solutions for a solved eigenvector problem, pair index solutions, $\text{index}\,\in\,0\dots
+\mathrm{n\_converged}-1$.

    Definition at line 272 of file slepc_solver.cc.

    @@ -733,7 +733,7 @@
    -

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    +

    Initialize solver for the linear system $Ax=\lambda x$. (Note: this is required before calling solve ())

    Definition at line 76 of file slepc_solver.cc.

    @@ -761,8 +761,8 @@
    -

    Same as above, but here initialize solver for the linear system $A
-x=\lambda B x$.

    +

    Same as above, but here initialize solver for the linear system $A
+x=\lambda B x$.

    Definition at line 86 of file slepc_solver.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-12-27 18:25:11.100889506 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-12-27 18:25:11.100889506 +0000 @@ -211,85 +211,85 @@

    The class ARKode is a wrapper to SUNDIALS variable-step, embedded, additive Runge-Kutta solver which is a general purpose solver for systems of ordinary differential equations characterized by the presence of both fast and slow dynamics.

    Fast dynamics are treated implicitly, and slow dynamics are treated explicitly, using nested families of implicit and explicit Runge-Kutta solvers.

    Citing directly from ARKode documentation:

    -

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    +

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y) + f_I (t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2645.png"/>

    -

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    -

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    +

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    +

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    The two right-hand side functions may be described as:

      -
    • $f_E(t, y)$: contains the "slow" time scale components of the system. This will be integrated using explicit methods.
    • -
    • $f_I(t, y)$: contains the "fast" time scale components of the system. This will be integrated using implicit methods.
    • +
    • $f_E(t, y)$: contains the "slow" time scale components of the system. This will be integrated using explicit methods.
    • +
    • $f_I(t, y)$: contains the "fast" time scale components of the system. This will be integrated using implicit methods.
    -

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    -

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    -

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    +

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    +

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    +

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2655.png"/>

    -

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
-  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    -

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    +

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
+  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    +

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    -\[
+<picture><source srcset=\[
     M\dot y = f_I(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2659.png"/>

    -

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    +

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    For both DIRK and ARK methods, an implicit system of the form

    -\[
+<picture><source srcset=\[
    G(z_i) \dealcoloneq M z_i - h_n A^I_{i,i} f_I (t^I_{n,i}, z_i) - a_i = 0
-  \] + \]" src="form_2662.png"/>

    -

    must be solved for each stage $z_i , i = 1, \ldots, s$, where we have the data

    -\[
+<p> must be solved for each stage <picture><source srcset=$z_i , i = 1, \ldots, s$, where we have the data

    +\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} [ A^E_{i,j} f_E(t^E_{n,j}, z_j)
    + A^I_{i,j} f_I (t^I_{n,j}, z_j)]
-  \] + \]" src="form_2664.png"/>

    for the ARK methods, or

    -\[
+<picture><source srcset=\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} A^I_{i,j} f_I (t^I_{n,j}, z_j)
-  \] + \]" src="form_2665.png"/>

    -

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    -

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    +

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    +

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    For systems of either type, ARKode allows a choice of solution strategy. The default solver choice is a variant of Newton's method,

    -\[
+<picture><source srcset=\[
    z_i^{m+1} = z_i^m +\delta^{m+1},
-  \] + \]" src="form_2669.png"/>

    -

    where $m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    -\[
+<p> where <picture><source srcset=$m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    +\[
    N(z_i^m) \delta^{m+1} = -G(z_i^m),
-  \] + \]" src="form_2671.png"/>

    where

    -\[
+<picture><source srcset=\[
   N \dealcoloneq M - \gamma J, \quad J
   \dealcoloneq \frac{\partial f_I}{\partial y},
   \qquad \gamma\dealcoloneq h_n A^I_{i,i}.
-  \] + \]" src="form_2672.png"/>

    -

    As an alternate to Newton's method, ARKode may solve for each stage $z_i ,i
-  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    -\[
+<p>As an alternate to Newton's method, <a class=ARKode may solve for each stage $z_i ,i
+  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    +\[
   z_i^{m+1} = g(z_i^{m}), m=0,1,\ldots.
-  \] + \]" src="form_2674.png"/>

    Unlike with Newton's method, this option does not require the solution of a linear system at each iteration, instead opting for solution of a low-dimensional least-squares solution to construct the nonlinear update.

    -

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    -

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    -

    This improvement may be significant even for "small" values, e.g. $1 \leq
-  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    -

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    +

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    +

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    +

    This improvement may be significant even for "small" values, e.g. $1 \leq
+  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    +

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    The user has to provide the implementation of at least one (or both) of the following std::functions:

    To provide a simple example, consider the harmonic oscillator problem:

    -\[
+<picture><source srcset=\[
   \begin{split}
     u'' & = -k^2 u \\
     u (0) & = 0 \\
     u'(0) & = k
   \end{split}
-  \] + \]" src="form_2682.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
   \begin{matrix}
     y_0' & =  y_1 \\
     y_1' & = - k^2 y_0
   \end{matrix}
-  \] + \]" src="form_2683.png"/>

    -

    That is $y' = A y$ where

    -\[
+<p>That is <picture><source srcset=$y' = A y$ where

    +\[
   A \dealcoloneq
   \begin{pmatrix}
   0 & 1 \\
   -k^2 &0
   \end{pmatrix}
-  \] + \]" src="form_2685.png"/>

    -

    and $y(0)=(0, k)^T$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
-*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    and $y(0)=(0, k)^T$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
+*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    A minimal implementation, using only explicit RK methods, is given by the following code snippet:

    using VectorType = Vector<double>;
    @@ -733,8 +733,8 @@
    -

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -755,8 +755,8 @@
    -

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -778,7 +778,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-12-27 18:25:11.148889835 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-12-27 18:25:11.152889863 +0000 @@ -203,72 +203,72 @@

    Citing from the SUNDIALS documentation:

    Consider a system of Differential-Algebraic Equations written in the general form

    -\[
+<picture><source srcset=\[
    \begin{cases}
        F(t,y,\dot y) = 0\, , \\
        y(t_0) = y_0\, , \\
        \dot y (t_0) = \dot y_0\, .
    \end{cases}
- \] + \]" src="form_2701.png"/>

    -

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    +

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    -\[
+<picture><source srcset=\[
    \sum_{i=0}^q \alpha_{n,i}\,y_{n-i}=h_n\,\dot y_n\, ,
    \label{eq:bdf}
- \] + \]" src="form_2705.png"/>

    -

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    +

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    -\[
+<picture><source srcset=\[
    G(y_n)\equiv F\left(t_n,y_n,\dfrac{1}{h_n}\sum_{i=0}^q
   \alpha_{n,i}\,y_{n-i}\right)=0\, .
- \] + \]" src="form_2712.png"/>

    The Newton method leads to a linear system of the form

    -\[
+<picture><source srcset=\[
    J[y_{n(m+1)}-y_{n(m)}]=-G(y_{n(m)})\, ,
- \] + \]" src="form_2713.png"/>

    -

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    +

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    -\[
+<picture><source srcset=\[
    J=\dfrac{\partial G}{\partial y}
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}\, ,
- \] + \]" src="form_2715.png"/>

    -

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    +

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    A simple example: an ordinary differential equation

    To provide a simple example, consider the following harmonic oscillator problem:

    -\[ \begin{split}
+<picture><source srcset=\[ \begin{split}
    u'' & = -k^2 u \\
    u (0) & = 0 \\
    u'(0) & = k
  \end{split}
- \] + \]" src="form_2717.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
  \begin{matrix}
    y_0' & -y_1      & = 0 \\
    y_1' & + k^2 y_0 & = 0
  \end{matrix}
- \] + \]" src="form_2718.png"/>

    -

    That is, $F(y', y, t) = y' + A y = 0 $ where

    -\[
+<p>That is, <picture><source srcset=$F(y', y, t) = y' + A y = 0 $ where

    +\[
  A =
  \begin{pmatrix}
  0 & -1 \\
  k^2 &0
  \end{pmatrix}
- \] + \]" src="form_2720.png"/>

    -

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
- = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    -

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    +

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
+ = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    This is achieved by the following snippet of code:

    using VectorType = Vector<double>;
    VectorType y(2);
    @@ -330,68 +330,68 @@
    std::function< void(VectorType &)> reinit_vector
    Definition ida.h:898

    A differential algebraic equation (DAE) example

    -

    A more interesting example is a situation where the form $F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    -\begin{align*}
+<p>A more interesting example is a situation where the form <picture><source srcset=$F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    +\begin{align*}
    u'(t) &= av(t),
    \\
    0 &= v(t) - u(t).
- \end{align*} + \end{align*}" src="form_2726.png"/>

    -

    One can combine the two variables into $y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    -\[
+<p> One can combine the two variables into <picture><source srcset=$y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    +\[
    u'(t) = au(t)
- \] + \]" src="form_2730.png"/>

    -

    which has solution $u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    -\begin{align*}
+<p> which has solution <picture><source srcset=$u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    +\begin{align*}
    \frac{\partial \mathbf u(\mathbf x,t)}{\partial t}
    - \nu \Delta \mathbf u(\mathbf x,t) + \nabla p(\mathbf x,t)
    &= \mathbf f(\mathbf x,t),
    \\
    \nabla \cdot \mathbf u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2732.png"/>

    -

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    +

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    Another case where we could eliminate a variable but do not want to is where that additional variable is introduced in the first place to work around some other problem. As an example, consider the time dependent version of the biharmonic problem we consider in step-47 (as well as some later ones). The equations we would then be interested in would read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta^2 u(\mathbf x,t) &=
    f(\mathbf x,t).
- \end{align*} + \end{align*}" src="form_2734.png"/>

    -

    As discussed in step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    -\begin{align*}
+<p> As discussed in <a class=step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    +\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta v(\mathbf x,t) &=
    f(\mathbf x,t),
    \\
    v(\mathbf x,t)-\Delta u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2736.png"/>

    -

    Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for $v$.

    -

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    -\begin{align*}
+<p> Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for <picture><source srcset=$v$.

    +

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    +\begin{align*}
    u'(t) &= a v(t)^{1/p},
    \\
    0 &= v(t) - u(t)^p.
- \end{align*} + \end{align*}" src="form_2739.png"/>

    We will impose initial conditions as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    u(0) &= 1 \\
    v(0) &= 1.
- \end{align*} + \end{align*}" src="form_2740.png"/>

    -

    The problem continues to have the solution $u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    -\[
+<p> The problem continues to have the solution <picture><source srcset=$u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    +\[
    F = \begin{pmatrix}u' -a v^{1/p} \\ -u^p + v \end{pmatrix}
- \] + \]" src="form_2743.png"/>

    and that the Jacobian we need to provide is

    -\[
+<picture><source srcset=\[
    J(\alpha) =
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}
    = \begin{pmatrix} \alpha && -av^{1/p-1}/p \\ -pu^{p-1} & 1 \end{pmatrix}
- \] + \]" src="form_2744.png"/>

    All of this can be implemented using the following code:

    const double a = 1.0;
    const double p = 1.5;
    @@ -447,30 +447,30 @@
    time_stepper.solve_dae(y, y_dot);
    std::function< void(const VectorType &rhs, VectorType &dst, const double tolerance)> solve_with_jacobian
    Definition ida.h:995
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
    -

    Note that in this code, we not only provide initial conditions for $u$ and $v$, but also for $u'$ and $v'$. We can do this here because we know what the exact solution is.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-12-27 18:25:11.184890082 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-12-27 18:25:11.188890110 +0000 @@ -179,14 +179,14 @@
    -

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
-y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    +

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
+y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    - -
    Enumerator
    none&#href_anchor"fielddoc">

    Do not try to make initial conditions consistent.

    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    +
    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    +
    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    @@ -565,8 +565,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 775 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-12-27 18:25:11.220890330 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-12-27 18:25:11.228890384 +0000 @@ -188,48 +188,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -430,7 +430,7 @@
    -

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    +

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 512 of file kinsol.h.

    @@ -452,14 +452,14 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    The setup_jacobian() function may call a user-supplied function, or a function within the linear solver group, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

    The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

    Parameters
    - - + +
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    @@ -484,12 +484,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -514,7 +514,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -536,7 +536,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 674 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-12-27 18:25:11.256890577 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-12-27 18:25:11.264890632 +0000 @@ -433,7 +433,7 @@
    -

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    +

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 366 of file kinsol.h.

    @@ -533,7 +533,7 @@
    -

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    +

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 410 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-12-27 18:25:11.352891236 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-12-27 18:25:11.352891236 +0000 @@ -373,15 +373,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -626,7 +626,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 80 of file scalapack.cc.

    @@ -663,7 +663,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 105 of file scalapack.cc.

    @@ -701,7 +701,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 121 of file scalapack.cc.

    @@ -777,7 +777,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 216 of file scalapack.cc.

    @@ -814,7 +814,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 290 of file scalapack.cc.

    @@ -1058,9 +1058,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 980 of file scalapack.cc.

    @@ -1098,13 +1098,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 990 of file scalapack.cc.

    @@ -1127,9 +1127,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1046 of file scalapack.cc.

    @@ -1152,9 +1152,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1056 of file scalapack.cc.

    @@ -1203,24 +1203,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1066 of file scalapack.cc.

    @@ -1249,11 +1249,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1183 of file scalapack.cc.

    @@ -1282,11 +1282,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1197 of file scalapack.cc.

    @@ -1314,12 +1314,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-12-27 18:25:11.404891593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-12-27 18:25:11.408891620 +0000 @@ -272,7 +272,7 @@
    Vector<double> solution_1d;
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -280,7 +280,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-12-27 18:25:11.448891895 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-12-27 18:25:11.456891950 +0000 @@ -379,8 +379,8 @@ \]" src="form_2508.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -401,7 +401,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-12-27 18:25:11.492892197 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-12-27 18:25:11.496892225 +0000 @@ -209,10 +209,10 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

      -
    • The global vector of unknown variables: $\mathbf x$.
    • +
    • The global vector of unknown variables: $\mathbf x$.
    • Objective function: $E(\mathbf x)$.
    • Rate of change of unknowns: $\mathbf v$.
    • Gradient of the objective function w.r.t unknowns: $\mathbf g = \nabla E(\mathbf x)$.
    • @@ -220,15 +220,15 @@
    • Initial guess of unknowns: $\mathbf x_0$.
    • Time step: $\Delta t$.
    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. -
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        +
      4. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. Set $\mathbf v = (1-\alpha) \mathbf v
                   + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      7. -
      8. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      9. +
      10. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-12-27 18:25:11.528892444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-12-27 18:25:11.532892472 +0000 @@ -440,7 +440,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

      Solve $A^Tx=b$ for $x$.

      +

      Solve $A^Tx=b$ for $x$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-12-27 18:25:11.576892774 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-12-27 18:25:11.580892801 +0000 @@ -590,7 +590,7 @@

    The solution will be returned in place of the right hand side vector.

    Parameters
    - +
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-12-27 18:25:11.668893406 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-12-27 18:25:11.668893406 +0000 @@ -1822,7 +1822,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2074,7 +2074,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2184,7 +2184,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2255,8 +2255,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2284,8 +2284,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-12-27 18:25:11.740893900 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-12-27 18:25:11.744893928 +0000 @@ -1625,7 +1625,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1970,7 +1970,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2080,7 +2080,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2151,8 +2151,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2180,8 +2180,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-12-27 18:25:11.820894449 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-12-27 18:25:11.824894477 +0000 @@ -401,8 +401,8 @@
    template<typename number>
    class SparseMIC< number >

    Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

    The decomposition

    -

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    +

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    Definition at line 45 of file sparse_mic.h.

    Member Typedef Documentation

    @@ -1891,7 +1891,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2143,7 +2143,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2253,7 +2253,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2324,8 +2324,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2353,8 +2353,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-12-27 18:25:11.908895054 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-12-27 18:25:11.916895109 +0000 @@ -1465,7 +1465,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1825,7 +1825,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1914,7 +1914,7 @@ const bool rebuild_sparsity_pattern = true&#href_anchor"memdoc">

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -1970,8 +1970,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -1991,8 +1991,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-12-27 18:25:11.984895575 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-12-27 18:25:11.992895630 +0000 @@ -1219,7 +1219,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    @@ -1242,7 +1242,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    @@ -1265,7 +1265,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1288,7 +1288,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    @@ -1397,7 +1397,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    +

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    @@ -1425,7 +1425,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    +

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    @@ -1460,7 +1460,7 @@
    -

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    +

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

    Definition at line 1463 of file sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-12-27 18:25:12.028895878 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-12-27 18:25:12.028895878 +0000 @@ -156,7 +156,7 @@

    The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

    The first template argument denotes the underlying numeric type, the second the constness of the matrix.

    Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 346 of file sparse_matrix.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-12-27 18:25:12.076896207 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-12-27 18:25:12.080896235 +0000 @@ -1174,7 +1174,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

    Definition at line 673 of file sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-12-27 18:25:12.116896482 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-12-27 18:25:12.120896509 +0000 @@ -177,7 +177,7 @@

    Detailed Description

    An iterator class for walking over the elements of a sparsity pattern.

    The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 279 of file sparsity_pattern.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-12-27 18:25:12.160896784 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-12-27 18:25:12.164896811 +0000 @@ -219,20 +219,20 @@ class SphericalManifold< dim, spacedim >

    Manifold description for a spherical space coordinate system.

    You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

    The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

    -

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    +

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    These two points would be connected (using a PolarManifold) by the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1525.png"/>

    This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

    -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1526.png"/>

    -

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    +

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

    For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

    This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-12-27 18:25:12.184896949 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-12-27 18:25:12.188896976 +0000 @@ -287,7 +287,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-12-27 18:25:12.268897526 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-12-27 18:25:12.272897553 +0000 @@ -318,7 +318,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

    Detailed Description

    template<int rank_, int dim, typename Number>
    -class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    +class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

    For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

    While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -647,7 +647,7 @@
   </tr>
 </table>
 </div><div class= -

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    +

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    @@ -909,8 +909,8 @@
    -

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    -

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    +

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    +

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

    It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

    To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

    @@ -1254,7 +1254,7 @@
    -

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    +

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    @@ -1910,7 +1910,7 @@ \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$" src="form_829.png"/>.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_830.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2841 of file symmetric_tensor.h.

    @@ -1999,8 +1999,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    @@ -2602,7 +2602,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    Definition at line 3735 of file symmetric_tensor.h.

    @@ -2632,7 +2632,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3757 of file symmetric_tensor.h.

    @@ -2662,7 +2662,7 @@
    -

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3784 of file symmetric_tensor.h.

    @@ -2968,13 +2968,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3006,13 +3006,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3159,7 +3159,7 @@ Initial value:

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    +

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    Definition at line 743 of file symmetric_tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-12-27 18:25:12.320897883 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-12-27 18:25:12.324897910 +0000 @@ -245,7 +245,7 @@

    In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

    This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

    Dealing with large data sets

    -

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    +

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

    • You will have a lot of processes that are all trying to read from the same file at the same time.
    • In most cases, the data stored on every process is the same, and while every process needs to be able to read from a table, it is not necessary that every process stores its own table: All MPI processes that happen to be located on the same machine might as well store only one copy and make it available to each other via shared memory; in this model, only one MPI process per machine needs to store the data, and all other processes could then access it.
    • /usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-12-27 18:25:12.364898185 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html 2024-12-27 18:25:12.364898185 +0000 @@ -213,7 +213,7 @@

      Two (or more) columns may be merged into a "supercolumn" by twice (or multiple) calling add_column_to_supercolumn(), see there. Additionally there is a function to set for each column the precision of the output of numbers, and there are several functions to prescribe the format and the captions the columns are written with in tex mode.

      A detailed explanation of this class is also given in the step-13 tutorial program.

      Example

      -

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      +

      This is a simple example demonstrating the usage of this class. The first column includes the numbers $i=1 \dots n$, the second $1^2 \dots n^2$, the third $\sqrt{1}\dots\sqrt{n}$, where the second and third columns are merged into one supercolumn with the superkey squares and roots. Additionally the first column is aligned to the right (the default was centered) and the precision of the square roots are set to be 6 (instead of 4 as default).

      for (unsigned int i = 1; i <= n; ++i)
      {
      @@ -244,9 +244,9 @@

      When generating output, TableHandler expects that all columns have the exact same number of elements in it so that the result is in fact a table. This assumes that in each of the iterations (time steps, nonlinear iterations, etc) you fill every single column. On the other hand, this may not always be what you want to do. For example, it could be that the function that computes the nonlinear residual is only called every few time steps; or, a function computing statistics of the mesh is only called whenever the mesh is in fact refined. In these cases, the add_value() function will be called less often for some columns and the column would therefore have fewer elements; furthermore, these elements would not be aligned with the rows that contain the other data elements that were produced during this iteration. An entirely different scenario is that the table is filled and at a later time we use the data in there to compute the elements of other rows; the ConvergenceTable class does something like this.

      To support both scenarios, the TableHandler class has a property called auto-fill mode. By default, auto-fill mode is off, but it can be enabled by calling set_auto_fill_mode(). If auto-fill mode is enabled we use the following algorithm:

        -
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • -
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • -
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • +
      • When calling add_value(key, value), we count the number of elements in the column corresponding to key. Let's call this number $m$.
      • +
      • We also determine the maximal number of elements in the other columns; call it $n$.
      • +
      • If $m < n-1$ then we add $n-m-1$ copies of the object T() to this column. Here, T is the data type of the given value. For example, if T is a numeric type, then T() is the number zero; if T is std::string, then T() is the empty string "".
      • Add the given value to this column.

      Padding the column with default elements makes sure that after the addition the column has as many entries as the longest other column. In other words, if we have skipped previous invocations of add_value() for a given key, then the padding will enter default values into this column.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-12-27 18:25:12.428898624 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 2024-12-27 18:25:12.432898652 +0000 @@ -281,13 +281,13 @@ class Tensor< rank_, dim, Number >

      A general tensor class with an arbitrary rank, i.e. with an arbitrary number of indices. The Tensor class provides an indexing operator and a bit of infrastructure, but most functionality is recursively handed down to tensors of rank 1 or put into external templated functions, e.g. the contract family.

      The rank of a tensor specifies which types of physical quantities it can represent:

      • -A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • +A rank-0 tensor is a scalar that can store quantities such as temperature or pressure. These scalar quantities are shown in this documentation as simple lower-case Latin letters e.g. $a, b, c, \dots$.
      • -A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • +A rank-1 tensor is a vector with dim components and it can represent vector quantities such as velocity, displacement, electric field, etc. They can also describe the gradient of a scalar field. The notation used for rank-1 tensors is bold-faced lower-case Latin letters e.g. $\mathbf a, \mathbf b, \mathbf c, \dots$. The components of a rank-1 tensor such as $\mathbf a$ are represented as $a_i$ where $i$ is an index between 0 and dim-1.
      • -A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • +A rank-2 tensor is a linear operator that can transform a vector into another vector. These tensors are similar to matrices with $\text{dim} \times \text{dim}$ components. There is a related class SymmetricTensor<2,dim> for tensors of rank 2 whose elements are symmetric. Rank-2 tensors are usually denoted by bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters for example $\boldsymbol{\varepsilon}, \boldsymbol{\sigma}$. The components of a rank 2 tensor such as $\mathbf A$ are shown with two indices $(i,j)$ as $A_{ij}$. These tensors usually describe the gradients of vector fields (deformation gradient, velocity gradient, etc.) or Hessians of scalar fields. Additionally, mechanical stress tensors are rank-2 tensors that map the unit normal vectors of internal surfaces into local traction (force per unit area) vectors.
      • -Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.
      • +Tensors with ranks higher than 2 are similarly defined in a consistent manner. They have $\text{dim}^{\text{rank}}$ components and the number of indices required to identify a component equals rank. For rank-4 tensors, a symmetric variant called SymmetricTensor<4,dim> exists.

      Using this tensor class for objects of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It also makes the code easier to read because of the semantic difference between a tensor (an object that relates to a coordinate system and has transformation properties with regard to coordinate rotations and transforms) and matrices (which we consider as operators on arbitrary vector spaces related to linear algebra things).

      Template Parameters
      @@ -1227,7 +1227,7 @@
      -

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      +

      Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

      @@ -1255,7 +1255,7 @@
      -

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      +

      Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

      @@ -1891,11 +1891,11 @@

      Entrywise multiplication of two tensor objects of general rank.

      This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

      -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

      Template Parameters
      @@ -1934,17 +1934,17 @@
      -

      The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

      +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

      Note
      For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
      -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
      +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

      Definition at line 3039 of file tensor.h.

      @@ -1974,7 +1974,7 @@
      -

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      +

      Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

      Definition at line 3065 of file tensor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-12-27 18:25:12.484899009 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-12-27 18:25:12.488899036 +0000 @@ -233,7 +233,7 @@

      Detailed Description

      template<int dim, int dim_A, int spacedim_A, int chartdim_A, int dim_B, int spacedim_B, int chartdim_B>
      class TensorProductManifold< dim, dim_A, spacedim_A, chartdim_A, dim_B, spacedim_B, chartdim_B >

      Tensor product manifold of two ChartManifolds.

      -

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      +

      This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

      An example usage would be the combination of a SphericalManifold with space dimension 2 and a FlatManifold with space dimension 1 to form a cylindrical manifold.

      pull_back(), push_forward(), and push_forward_gradient() are implemented by splitting the input argument into inputs for A and B according to the given dimensions and applying the corresponding operations before concatenating the result.

      Note
      The dimension arguments dim_A and dim_B are not used.
      @@ -605,24 +605,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -631,11 +631,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-12-27 18:25:12.524899283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-12-27 18:25:12.528899311 +0000 @@ -174,7 +174,7 @@ M_1 \otimes A_0 \end{align*}" src="form_2024.png"/>

      -

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      +

      in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

      This class implements two basic operations, namely the usual multiplication by a vector and the inverse. For both operations, fast tensorial techniques can be applied that implement the operator evaluation in $\text{size}(M)^{d+1}$ arithmetic operations, considerably less than $\text{size}(M)^{2d}$ for the naive forward transformation and $\text{size}(M)^{3d}$ for setting up the inverse of $L$.

      Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to 1964's work by Lynch et al. [Lynch1964],

      \begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html	2024-12-27 18:25:12.584899695 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html	2024-12-27 18:25:12.580899668 +0000
@@ -236,7 +236,7 @@
 </table>
 <a name=

      Detailed Description

      template<int dim>
      -class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      +class TorusManifold< dim >

      Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

      This class was developed to be used in conjunction with GridGenerator::torus.

      Definition at line 861 of file manifold_lib.h.

      @@ -702,7 +702,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      +

      Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

      This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

      Refer to the general documentation of this class for more information.

      @@ -732,24 +732,24 @@
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      -

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

      +

      For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

      +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

      In image space, i.e., in the space in which we operate, this leads to the curve

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

      -

      What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

      -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

      +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -758,11 +758,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

      This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

      -

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      +

      Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-12-27 18:25:12.628899997 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-12-27 18:25:12.636900052 +0000 @@ -221,12 +221,12 @@
      template<int dim, int spacedim = dim>
      class TransfiniteInterpolationManifold< dim, spacedim >

      A mapping class that extends curved boundary descriptions into the interior of the computational domain. The outer curved boundary description is assumed to be given by another manifold (e.g. a polar manifold on a circle). The mechanism to extend the boundary information is a so-called transfinite interpolation. The use of this class is discussed extensively in step-65.

      The formula for extending such a description in 2d is, for example, described on Wikipedia. Given a point $(u,v)$ on the chart, the image of this point in real space is given by

      -\begin{align*}
+<picture><source srcset=\begin{align*}
 \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v)
 + u \mathbf c_3(v) \\
 &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf
 x_2 + uv \mathbf x_3 \right]
-\end{align*} +\end{align*}" src="form_1537.png"/>

      where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four bounding vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell. If a curved manifold is attached to any of these lines, the evaluation is done according to Manifold::get_new_point() with the two end points of the line and appropriate weight. In 3d, the generalization of this formula is implemented, creating a weighted sum of the vertices (positive contribution), the lines (negative), and the faces (positive contribution).

      This manifold is usually attached to a coarse mesh and then places new points as a combination of the descriptions on the boundaries, weighted appropriately according to the position of the point in the original chart coordinates $(u,v)$. This manifold should be preferred over setting only a curved manifold on the boundary of a mesh in most situations as it yields more uniform mesh distributions as the mesh is refined because it switches from a curved description to a straight description over all children of the initial coarse cell this manifold was attached to. This way, the curved nature of the manifold that is originally contained in one coarse mesh layer will be applied to more than one fine mesh layer once the mesh gets refined. Note that the mechanisms of TransfiniteInterpolationManifold are also built into the MappingQ class when only a surface of a cell is subject to a curved description, ensuring that even the default case without this manifold gets optimal convergence rates when applying curved boundary descriptions.

      @@ -932,11 +932,11 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
      -

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      -

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      -

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      +

      Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

      +

      While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

      +

      The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-12-27 18:25:12.704900519 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-12-27 18:25:12.708900547 +0000 @@ -338,7 +338,7 @@
      x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

      Detailed Description

      template<int structdim, int dim, int spacedim>
      -class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< structdim, dim, spacedim >

      A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a specialization of this class for the case where structdim equals zero, i.e., for vertices of a triangulation.

      Definition at line 756 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-12-27 18:25:12.760900904 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-12-27 18:25:12.760900904 +0000 @@ -291,7 +291,7 @@ &#href_anchor"memitem:a34cceffc302e3c23552635478b9fc983" id="r_a34cceffc302e3c23552635478b9fc983">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int spacedim>
      -class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, 1, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

      The current specialization of the TriaAccessor<0,dim,spacedim> class for vertices of a one-dimensional triangulation exists since in the dim == 1 case vertices are also faces.

      Definition at line 2319 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:25:12.808901233 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-12-27 18:25:12.808901233 +0000 @@ -242,7 +242,7 @@ &#href_anchor"memitem:abda88195917e4d56f80eab016f21bde3" id="r_abda88195917e4d56f80eab016f21bde3">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

      Detailed Description

      template<int dim, int spacedim>
      -class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      +class TriaAccessor< 0, dim, spacedim >

      This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

      There is a further specialization of this class for the case that dim equals one, i.e., for vertices of a one-dimensional triangulation, since in that case vertices are also faces.

      Definition at line 1907 of file tria_accessor.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-12-27 18:25:12.960902277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-12-27 18:25:12.968902332 +0000 @@ -1905,7 +1905,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -4880,7 +4880,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:13.052902909 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-12-27 18:25:13.060902964 +0000 @@ -1016,7 +1016,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

      Definition at line 443 of file trilinos_block_sparse_matrix.h.

      @@ -1046,7 +1046,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Definition at line 456 of file trilinos_block_sparse_matrix.h.

      @@ -2132,7 +2132,7 @@
      -

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      +

      Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

      @@ -2634,7 +2634,7 @@
      -

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      +

      Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      @@ -2742,7 +2742,7 @@
      -

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      +

      Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

      Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:13.124903403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-12-27 18:25:13.128903431 +0000 @@ -1739,7 +1739,7 @@
      -

      $U = U * V$: scalar product.

      +

      $U = U * V$: scalar product.

      @@ -1765,7 +1765,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1817,7 +1817,7 @@
      -

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      +

      Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

      @@ -1843,7 +1843,7 @@
      -

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      +

      Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

      @@ -1869,7 +1869,7 @@
      -

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      +

      Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

      @@ -1904,7 +1904,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< Number > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately on deal.II's vector classes (Vector<Number> and LinearAlgebra::distributed::Vector<double>). This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      @@ -2151,7 +2151,7 @@
      -

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      +

      $U(0-DIM)+=s$. Addition of s to all components. Note that s is a scalar and not a vector.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:13.200903925 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-12-27 18:25:13.208903980 +0000 @@ -1323,7 +1323,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      @@ -1395,7 +1395,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      @@ -1413,7 +1413,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      @@ -1431,7 +1431,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      @@ -1477,7 +1477,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< TrilinosScalar > &values)

      The reason this function exists is for compatibility with deal.II's own vector classes which can implement this functionality with less memory transfer. However, for Trilinos vectors such a combined operation is not natively supported and thus the cost is completely equivalent as calling the two methods separately.

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-12-27 18:25:13.236904172 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-12-27 18:25:13.244904227 +0000 @@ -319,7 +319,7 @@
      -

      A function object that users should supply and that is intended to compute the residual $F(u)$.

      +

      A function object that users should supply and that is intended to compute the residual $F(u)$.

      Note
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors for this callback, so if it throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.

      Definition at line 204 of file nox.h.

      @@ -381,7 +381,7 @@
      -

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the Jacobian $\nabla_u F(u)$ to x and writes the result in y. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations. For instance, this function is required if the polynomial line search (NOX::LineSearch::Polynomial) is chosen, whereas for the full step case (NOX::LineSearch::FullStep) it won't be called.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors for this callback, so if it throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.
      @@ -403,7 +403,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y and writes the result in x. The parameter tolerance specifies the error reduction if an iterative solver is used in applying the inverse matrix. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is optional and is used in the case of certain configurations.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can deal with "recoverable" errors for this callback, if the NOX parameter "Newton/Rescue Bad Newton Solve" is set to true (which is, in fact, its default value). If this parameters is set to true, then exceptions of type RecoverableUserCallbackError are eaten for this callback and NOX can safely proceed with a recovery step. Exceptions of other types are still treated as "irrecoverable".
      @@ -425,7 +425,7 @@
      -

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      +

      A user function that applies the inverse of the Jacobian $[\nabla_u F(u)]^{-1}$ to y, writes the result in x and returns the number of linear iterations the linear solver needed. The parameter tolerance species the error reduction if an iterative solver is used. The Jacobian to be used (i.e., more precisely: the linearization point $u$ above) is the one computed when the setup_jacobian function was last called.

      Note
      This function is used if solve_with_jacobian is not provided. Its return value is compared again AdditionalFlags::threshold_n_linear_iterations; if it is larger, the preconditioner will be built before the next linear system is solved. The use of this approach is predicated on the idea that one can keep using a preconditioner built earlier as long as it is a good preconditioner for the matrix currently in use – where "good" is defined as leading to a number of iterations to solve linear systems less than the threshold given by the current variable.
      This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can deal with "recoverable" errors for this callback, if the NOX parameter "Newton/Rescue Bad Newton Solve" is set to true (which is, in fact, its default value). If this parameters is set to true, then exceptions of type RecoverableUserCallbackError are eaten for this callback and NOX can safely proceed with a recovery step. Exceptions of other types are still treated as "irrecoverable".
      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-12-27 18:25:13.320904749 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-12-27 18:25:13.324904777 +0000 @@ -2108,7 +2108,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

      The vector has to be initialized with the same IndexSet the matrix was initialized with.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-12-27 18:25:13.384905189 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-12-27 18:25:13.392905243 +0000 @@ -467,7 +467,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 100 of file trilinos_sparsity_pattern.cc.

      @@ -497,7 +497,7 @@
      -

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      +

      Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

      The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

      Definition at line 109 of file trilinos_sparsity_pattern.cc.

      @@ -769,7 +769,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

      Definition at line 214 of file trilinos_sparsity_pattern.cc.

      @@ -799,7 +799,7 @@
      -

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      +

      Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

      The vector n_entries_per_row specifies the number of entries in each row.

      Definition at line 227 of file trilinos_sparsity_pattern.cc.

      @@ -1300,7 +1300,7 @@
      -

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      +

      Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

      Definition at line 878 of file trilinos_sparsity_pattern.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-12-27 18:25:13.448905628 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-12-27 18:25:13.448905628 +0000 @@ -323,7 +323,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

      Constructor that takes the number of locally-owned degrees of freedom local_size and the number of ghost degrees of freedom ghost_size.

      -

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      +

      The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

      Note
      Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-12-27 18:25:13.480905848 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-12-27 18:25:13.480905848 +0000 @@ -238,8 +238,8 @@ const unsigned int column_block_size&#href_anchor"memdoc">

      Constructor for a process grid for a given mpi_communicator. In this case the process grid is heuristically chosen based on the dimensions and block-cyclic distribution of a target matrix provided in n_rows_matrix, n_columns_matrix, row_block_size and column_block_size.

      -

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      -

      For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

      +

      The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

      +

      For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

      Definition at line 208 of file process_grid.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-12-27 18:25:13.600906672 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-12-27 18:25:13.604906699 +0000 @@ -1324,7 +1324,7 @@

      Return the scalar product of two vectors. The return type is the underlying type of this vector, so the return type and the accuracy with which it the result is computed depend on the order of the arguments of this vector.

      -

      For complex vectors, the scalar product is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex vectors, the scalar product is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1345,7 +1345,7 @@
      -

      Return the square of the $l_2$-norm.

      +

      Return the square of the $l_2$-norm.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1387,7 +1387,7 @@
      -

      $l_1$-norm of the vector. The sum of the absolute values.

      +

      $l_1$-norm of the vector. The sum of the absolute values.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1408,7 +1408,7 @@
      -

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      +

      $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1429,7 +1429,7 @@
      -

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      +

      $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      @@ -1480,7 +1480,7 @@
      return_value = *this * W;
      void add(const std::vector< size_type > &indices, const std::vector< OtherNumber > &values)

      The reason this function exists is that this operation involves less memory transfer than calling the two functions separately. This method only needs to load three vectors, this, V, W, whereas calling separate methods means to load the calling vector this twice. Since most vector operations are memory transfer limited, this reduces the time by 25% (or 50% if W equals this).

      -

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      +

      For complex-valued vectors, the scalar product in the second step is implemented as $\left<v,w\right>=\sum_i v_i \bar{w_i}$.

      Note
      If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 2024-12-27 18:25:13.660907084 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVectorFunctionFromScalarFunctionObject.html 2024-12-27 18:25:13.660907084 +0000 @@ -235,7 +235,7 @@
      component_mask(&one, 1, 3);
      Definition point.h:111
      -

      Here, component_mask then represents a Function object that for every point returns the vector $(0, 1, 0)^T$, i.e. a mask function that could, for example, be passed to VectorTools::integrate_difference(). This effect can also be achieved using the ComponentSelectFunction class but is obviously easily extended to functions that are non-constant in their one component.

      +

      Here, component_mask then represents a Function object that for every point returns the vector $(0, 1, 0)^T$, i.e. a mask function that could, for example, be passed to VectorTools::integrate_difference(). This effect can also be achieved using the ComponentSelectFunction class but is obviously easily extended to functions that are non-constant in their one component.

      Definition at line 869 of file function.h.

      Member Typedef Documentation

      /usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-12-27 18:25:13.720907496 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-12-27 18:25:13.724907523 +0000 @@ -1336,7 +1336,7 @@

      Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

      -
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      @@ -1432,7 +1432,7 @@

      Given a component mask (see this glossary entry ), produce a block mask (see this glossary entry ) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

      -
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      +
      Note
      This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
      This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-12-27 18:25:13.748907688 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-12-27 18:25:13.752907715 +0000 @@ -179,7 +179,7 @@
      Parameters
      - +
      real_support_pointsThe position of the mapping support points in real space, queried by MappingQ::compute_mapping_support_points().
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-12-27 18:25:13.900908732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-12-27 18:25:13.908908787 +0000 @@ -1857,7 +1857,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2424,7 +2424,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7022,7 +7022,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-12-27 18:25:14.068909885 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-12-27 18:25:14.072909913 +0000 @@ -1766,7 +1766,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2380,7 +2380,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -6999,7 +6999,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-12-27 18:25:14.244911094 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-12-27 18:25:14.248911121 +0000 @@ -2077,7 +2077,7 @@
      -

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      +

      Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

      Definition at line 3696 of file tria.cc.

      @@ -2961,7 +2961,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -3420,7 +3420,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7753,7 +7753,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:14.420912302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-12-27 18:25:14.428912357 +0000 @@ -2242,7 +2242,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2813,7 +2813,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7298,7 +7298,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-12-27 18:25:14.596913511 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-12-27 18:25:14.604913566 +0000 @@ -2392,7 +2392,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2896,7 +2896,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7230,7 +7230,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-12-27 18:25:14.768914692 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-12-27 18:25:14.768914692 +0000 @@ -2178,7 +2178,7 @@

      When vertices have been moved locally, for example using code like

      cell->vertex(0) = new_location;

      then this function can be used to update the location of vertices between MPI processes.

      -

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      +

      All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

      Note
      It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
      This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
      @@ -2666,7 +2666,7 @@
      -

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      +

      Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

      The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

      Note
      This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
      @@ -7199,7 +7199,7 @@
      -

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      +

      Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

      Note
      The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
      /usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-12-27 18:25:14.812914994 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-12-27 18:25:14.816915021 +0000 @@ -147,9 +147,9 @@
      Member DoFTools::map_dofs_to_support_points (const hp::MappingCollection< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask={})
      Use the function that returns the std::map instead.
      Member FEEvaluationData< dim, Number, is_face >::get_normal_vector (const unsigned int q_point) const
      -
      Use normal_vector() instead.
      +
      Use normal_vector() instead.
      Member FEFaceEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate_scatter (const bool integrate_values, const bool integrate_gradients, VectorType &output_vector)
      -
      Please use the integrate_scatter() function with the EvaluationFlags argument.
      +
      Please use the integrate_scatter() function with the EvaluationFlags argument.
      Member FEInterfaceViews::Vector< dim, spacedim >::average_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
      Use the average_of_hessians() function instead.
      Member FEInterfaceViews::Vector< dim, spacedim >::jump_gradient (const unsigned int interface_dof_index, const unsigned int q_point) const
      @@ -157,7 +157,7 @@
      Member FEInterfaceViews::Vector< dim, spacedim >::jump_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
      Use the average_of_hessians() function instead.
      Member FEPointEvaluationBase< n_components_, dim, spacedim, Number >::real_point (const unsigned int point_index) const
      -
      Use the function quadrature_point() instead.
      +
      Use the function quadrature_point() instead.
      Member FETools::Compositing::compute_nonzero_components (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0, const bool do_tensor_product=true)
      Use the versions of this function that take a vector of elements or an initializer list as arguments.
      Member FETools::Compositing::compute_restriction_is_additive_flags (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0)
      @@ -169,9 +169,9 @@
      Member FiniteElement< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Mapping< dim, spacedim > &mapping, const Quadrature< dim - 1 > &quadrature, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
      Use the version taking a hp::QCollection argument.
      Member GridTools::fix_up_distorted_child_cells (const typename Triangulation< dim, spacedim >::DistortedCellList &distorted_cells, Triangulation< dim, spacedim > &triangulation)
      -
      This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
      +
      This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
      Member GridTools::rotate (const double angle, const unsigned int axis, Triangulation< dim, 3 > &triangulation)
      -
      Use the alternative with the unit vector instead.
      +
      Use the alternative with the unit vector instead.
      Member identity
      Use std_cxx20::type_identity instead.
      Member LinearAlgebra::CUDAWrappers::Vector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={})
      @@ -231,9 +231,9 @@
      Member parallel::fullydistributed::Triangulation< dim, spacedim >::load (const std::string &filename, const bool autopartition) override
      The autopartition parameter has been removed.
      Member ParameterHandler::ShortText
      -
      Use ShortPRM instead of ShortText.
      +
      Use ShortPRM instead of ShortText.
      Member ParameterHandler::Text
      -
      Use PRM instead of Text.
      +
      Use PRM instead of Text.
      Member Particles::ParticleAccessor< dim, spacedim >::set_property_pool (PropertyPool< dim, spacedim > &property_pool)
      This function is only kept for backward compatibility and has no meaning any more. ParticleAccessors always use the property pool of the owning particle handler.
      Member Particles::ParticleHandler< dim, spacedim >::register_load_callback_function (const bool serialization)
      @@ -241,7 +241,7 @@
      Member Particles::ParticleHandler< dim, spacedim >::register_store_callback_function ()
      Please use prepare_for_coarsening_and_refinement() or prepare_for_serialization() instead. See there for further information about the purpose of this function.
      Class PathSearch
      -
      Use the std::filesystem facilities instead.
      +
      Use the std::filesystem facilities instead.
      Member PETScWrappers::SolverBiCG::SolverBiCG (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
      Member PETScWrappers::SolverBicgstab::SolverBicgstab (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
      @@ -277,7 +277,7 @@
      Member Physics::Transformations::Rotations::rotation_matrix_3d (const Point< 3, Number > &axis, const Number &angle)
      Use the variant with a Tensor as an axis.
      Member PolarManifold< dim, spacedim >::center
      -
      Use get_center() instead.
      +
      Use get_center() instead.
      Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const unsigned int n_quadrature_points)
      Use the version of this function which takes a combined_orientation argument instead.
      Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const hp::QCollection< dim - 1 > &quadrature)
      @@ -285,7 +285,7 @@
      Member QProjector< dim >::DataSetDescriptor::subface (const ReferenceCell &reference_cell, const unsigned int face_no, const unsigned int subface_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const unsigned int n_quadrature_points, const internal::SubfaceCase< dim > ref_case=internal::SubfaceCase< dim >::case_isotropic)
      Use the version of this function which takes a combined_orientation argument instead.
      Member ReferenceCell::compute_orientation (const std::array< T, N > &vertices_0, const std::array< T, N > &vertices_1) const
      -
      Use get_combined_orientation() instead.
      +
      Use get_combined_orientation() instead.
      Member ReferenceCell::permute_according_orientation (const std::array< T, N > &vertices, const unsigned int orientation) const
      Use permute_by_combined_orientation() instead.
      Class SLEPcWrappers::TransformationSpectrumFolding
      @@ -295,7 +295,7 @@
      Member SparsityTools::distribute_sparsity_pattern (BlockDynamicSparsityPattern &dsp, const std::vector< IndexSet > &owned_set_per_cpu, const MPI_Comm mpi_comm, const IndexSet &myrange)
      Use the distribute_sparsity_pattern() with a single index set for the present MPI process only.
      Member SphericalManifold< dim, spacedim >::center
      -
      Use get_center() instead.
      +
      Use get_center() instead.
      Member SymmetricTensor< rank_, dim, Number >::begin_raw ()
      This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
      Member SymmetricTensor< rank_, dim, Number >::begin_raw () const
      @@ -321,7 +321,7 @@
      Member Utilities::MPI::create_group (const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
      Use MPI_Comm_create_group directly
      Member Utilities::MPI::RemotePointEvaluation< dim, spacedim >::RemotePointEvaluation (const double tolerance, const bool enforce_unique_mapping=false, const unsigned int rtree_level=0, const std::function< std::vector< bool >()> &marked_vertices={})
      -
      +
      Member XDMFEntry::get_xdmf_content (const unsigned int indent_level, const ReferenceCell &reference_cell) const
      Use the other function instead.
      Member XDMFEntry::XDMFEntry (const std::string &filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
      /usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-12-27 18:25:14.848915241 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-12-27 18:25:14.848915241 +0000 @@ -198,7 +198,7 @@
      -

      One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      +

      One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

      \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -332,7 +332,7 @@
   </tr>
 </table>
 </div><div class= -

      Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      +

      Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

      \[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html	2024-12-27 18:25:14.872915406 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html	2024-12-27 18:25:14.876915433 +0000
@@ -171,11 +171,11 @@
 <p><a class=Quadrature coupling options when assembling quadrature formulas for double integrals.

      When computing the approximation of double integrals of the form

      -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) f(x_1) g(x_2) dT_1 dT_2,
-\] +\]" src="form_1087.png"/>

      -

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

      +

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

      This enum class provides a way to specify how the quadrature points and weights should be combined. In general, the two FEValuesBase objects provide different quadrature rules, and these can be interpreted in different ways, depending on the kernel function that is being integrated, and on how the two quadrature rules were constructed.

      This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the quadrature points and weights of the two FEValuesBase objects.

      @@ -217,11 +217,11 @@

      DoF coupling options when assembling double integrals.

      When computing the approximation of double integrals of the form

      -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) v_i(x_1) w_j(x_2) dT_1 dT_2,
-\] +\]" src="form_1090.png"/>

      -

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

      +

      where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

      This enum class provides a way to specify how the degrees of freedom should be combined. There are two cases of interest:

      1. the two FEValuesBase objects refer to different DoFHandlers
      2. @@ -230,14 +230,14 @@

        In the first case, one usually treats the two sets of degrees of freedom as independent of each other, and the resulting matrix is generally rectangular.

        In the second case, one may choose to treat the two sets of degrees of freedom either as independent or to group them together. A similar approach is used in the FEInterfaceValues class, where the degrees of freedom of the two FEValuesBase objects are grouped together, in a contiguous way, so that the resulting basis functions are interpreted in the following way:

        -\[
+<picture><source srcset=\[
 \phi_{1,i}(x) = \begin{cases} v_i(x) & \text{ if } i \in [0,n_l) \\
 0 & \text{ if } i \in [n_1, n_1+n_2] \end{cases},\quad \phi_{1,i}(x) =
 \begin{cases} 0(x) & \text{ if } i \in [0,n_1) \\
 w_{i-n_1}(x) & \text{ if } i \in [n_1, n_1+n_2] \end{cases},
-\] +\]" src="form_1093.png"/>

        -

        where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

        +

        where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

        This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the local dof indices of the two FEValuesBase objects.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-12-27 18:25:15.444919333 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-12-27 18:25:15.448919360 +0000 @@ -137,7 +137,7 @@ w_q, \]" src="form_272.png"/>

      - where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight. + where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight.

      In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

      On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-12-27 18:25:15.476919552 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-12-27 18:25:15.480919580 +0000 @@ -116,7 +116,7 @@
      Enumerator
      independent&#href_anchor"fielddoc">

      The FEValuesBase objects may have different dof indices, possibly indexing different DoFHandler objects, and we are interested in assembling a generally rectangular matrix, where there is no relationship between the two index spaces.

      /usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-12-27 18:25:14.952915955 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-12-27 18:25:14.960916010 +0000 @@ -949,7 +949,7 @@
      1085 const std::vector<
      -
      1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      +
      1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      1087 &non_matching_faces_marked_vertices,
      1088 const unsigned int quad_no,
      1089 const unsigned int dof_no,
      @@ -1095,7 +1095,7 @@
      1229 const std::vector<
      -
      1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      +
      1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
      1231 &non_matching_faces_marked_vertices,
      1232 const unsigned int n_q_pnts_1D,
      1233 const unsigned int dof_no,
      @@ -1495,7 +1495,6 @@
      static const unsigned int invalid_unsigned_int
      Definition types.h:220
      -
      unsigned int boundary_id
      Definition types.h:144
      std::vector< BoundingBox< boost::geometry::dimension< typename Rtree::indexable_type >::value > > extract_rtree_level(const Rtree &tree, const unsigned int level)
      RTree< typename LeafTypeIterator::value_type, IndexType, IndexableGetter > pack_rtree(const LeafTypeIterator &begin, const LeafTypeIterator &end)
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-12-27 18:25:15.004916312 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-12-27 18:25:15.008916339 +0000 @@ -185,7 +185,7 @@

      The macro DEAL_II_CONSTEXPR expands to constexpr if the compiler supports enough constexpr features (such as loops). If the compiler does not then this macro expands to nothing.

      Functions declared as constexpr can be evaluated at compile time. Hence code like

      constexpr double det_A = determinant(A);
      DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
      -

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      +

      assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

      Function Documentation

      ◆ new_task()

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-12-27 18:25:15.028916476 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-12-27 18:25:15.032916504 +0000 @@ -185,7 +185,7 @@
      template <typename VectorType>
      virtual void Tstep(VectorType &u, const VectorType &v) const =0;
      };
      -

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

      +

      where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

      SparsityPatternType
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-12-27 18:25:15.408919086 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-12-27 18:25:15.416919140 +0000 @@ -337,7 +337,7 @@
      std::function<void(Domain &, const Range &)> Tvmult;
      std::function<void(Domain &, const Range &)> Tvmult_add;

      Thus, such an object can be used as a matrix object in all iterative solver classes, either as a matrix object, or as preconditioner.

      -

      The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      +

      The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

      #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
      double k;
      @@ -1387,7 +1387,7 @@
      const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

      Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

      +

      Create a PackagedOperation object from a LinearOperator and a reference to a vector u of the Range space. The object stores the PackagedOperation $\text{op}^T \,u$ (in matrix notation). return (return_add) are implemented with Tvmult(__1,u) (Tvmult_add(__1,u)).

      The PackagedOperation object that is created stores a reference to u. Thus, the vector must remain a valid reference for the whole lifetime of the PackagedOperation object. All changes made on u after the creation of the PackagedOperation object are reflected by the operator object.

      Definition at line 703 of file packaged_operation.h.

      @@ -1411,7 +1411,7 @@
      const PackagedOperation< Domain > & comp&#href_anchor"memdoc"> -

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

      +

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op} \,comp$ (in matrix notation).

      Definition at line 730 of file packaged_operation.h.

      @@ -1434,7 +1434,7 @@
      const LinearOperator< Range, Domain, Payload > & op&#href_anchor"memdoc"> -

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

      +

      Composition of a PackagedOperation object with a LinearOperator. The object stores the computation $\text{op}^T \,comp$ (in matrix notation).

      Definition at line 774 of file packaged_operation.h.

      @@ -1470,7 +1470,7 @@

      Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

      We construct the definition of the Schur complement in the following way:

      Consider a general system of linear equations that can be decomposed into two major sets of equations:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1483,60 +1483,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

      -

      where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

      +

      where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

      This is equivalent to the following two statements:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

      -

      Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

      -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

      +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

      which amount to performing block Gaussian elimination on this system of equations.

      For the purpose of the current implementation, we choose to substitute (3) into (2)

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

      This leads to the result

      -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

      -

      with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

      -

      So for any arbitrary vector $ a $, the Schur complement performs the following operation:

      -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

      +

      So for any arbitrary vector $ a $, the Schur complement performs the following operation:

      +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

      A typical set of steps needed the solve a linear system (1),(2) would be:

      1. Define the inverse matrix A_inv (using inverse_operator()).
      2. -
      3. Define the Schur complement $ S $ (using schur_complement()).
      4. -
      5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
      6. +
      7. Define the Schur complement $ S $ (using schur_complement()).
      8. +
      9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
      10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

        -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

      11. -
      12. Solve for $ y $ in (5):

        -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

        +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

      13. Perform the post-processing step from (3) using postprocess_schur_solution():

        -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

      @@ -1582,10 +1582,10 @@
      LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
      PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
      -

      In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

      -

      A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

      -

      From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

      // Construction of approximate inverse of Schur complement
      +

      In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

      +

      A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

      +

      From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

      // Construction of approximate inverse of Schur complement
      const auto A_inv_approx = linear_operator(preconditioner_A);
      const auto S_approx = schur_complement(A_inv_approx,B,C,D);
      @@ -1608,8 +1608,8 @@
      // Solve for y
      y = S_inv * rhs;
      x = postprocess_schur_solution (A_inv,B,y,f);
      -

      Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

      +

      Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

      However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

      Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

      See also
      Block (linear algebra)
      @@ -1646,15 +1646,15 @@
      const Range_2 & g&#href_anchor"memdoc">

      For the system of equations

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

      this operation performs the pre-processing (condensation) step on the RHS subvector g so that the Schur complement can be used to solve this system of equations. More specifically, it produces an object that represents the condensed form of the subvector g, namely

      -\[
+<picture><source srcset=\[
   g' = g - C \: A^{-1} \: f
-\] +\]" src="form_1968.png"/>

      See also
      Block (linear algebra)
      @@ -1690,15 +1690,15 @@
      const Range_1 & f&#href_anchor"memdoc">

      For the system of equations

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

      this operation performs the post-processing step of the Schur complement to solve for the second subvector x once subvector y is known, with the result that

      -\[
+<picture><source srcset=\[
   x =  A^{-1}(f - By)
-\] +\]" src="form_1969.png"/>

      See also
      Block (linear algebra)
      @@ -2965,7 +2965,7 @@
      &#href_anchor"memitem:">namespace  Differentiation::SD
      &#href_anchor"details" id="details">

      Detailed Description

      A group dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

      -

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      +

      Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

      Automatic differentiation

      Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

      @@ -164,38 +164,38 @@
    • reverse-mode (or reverse accumulation) auto-differentiation.
    • As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

      -

      With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      -
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      -

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

      +
      Forward mode automatic differentiation
      Forward mode automatic differentiation
      Reverse mode automatic differentiation
      Reverse mode automatic differentiation

      Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

      +

      Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

      +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

      -

      As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

      -

      In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

      +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

      The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

      In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

      -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

      -

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      +

      The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

      Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

      Supported automatic differentiation libraries

      @@ -343,7 +343,7 @@

      Symbolic expressions and differentiation

      Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

      -

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      +

      To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

      Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

      The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

      As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-12-27 18:25:15.556920102 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-12-27 18:25:15.560920129 +0000 @@ -216,7 +216,7 @@
    • If you have boundary conditions that set a certain part of the solution's value, for example no normal flux, $\mathbf n \cdot
   \mathbf u=0$ (as happens in flow problems and is handled by the VectorTools::compute_no_normal_flux_constraints function) or prescribed tangential components, $\mathbf{n}\times\mathbf{u}=
   \mathbf{n}\times\mathbf{f}$ (as happens in electromagnetic problems and is handled by the VectorTools::project_boundary_values_curl_conforming function). For the former case, imagine for example that we are at at vertex where the normal vector has the form $\frac 1{\sqrt{14}}
-  (1,2,3)^T$ and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    • + (1,2,3)^T$" src="form_43.png"/> and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
    • If you have hanging node constraints, for example in a mesh like this:
      @@ -309,7 +309,7 @@ \]" src="form_70.png"/>

      instead (see, for example, [Shephard1984]).

      -

      Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

      +

      Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

      \[
  x \leftarrow C\,x+k.
 \] @@ -376,7 +376,7 @@

    • Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

      -

      This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

      +

      This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

      This algorithm makes no distinction between shape functions on each cell, i.e., it simply couples all degrees of freedom on a cell with all other degrees of freedom on a cell. This is often the case, and always a safe assumption. However, if you know something about the structure of your operator and that it does not couple certain shape functions with certain test functions, then you can get a sparser sparsity pattern by calling a variant of the current function described below that allows to specify which vector components couple with which other vector components.

      The method described above lives on the assumption that coupling between degrees of freedom only happens if shape functions overlap on at least one cell. This is the case with most usual finite element formulations involving conforming elements. However, for formulations such as the Discontinuous Galerkin finite element method, the bilinear form contains terms on interfaces between cells that couple shape functions that live on one cell with shape functions that live on a neighboring cell. The current function would not see these couplings, and would consequently not allocate entries in the sparsity pattern. You would then get into trouble during matrix assembly because you try to write into matrix entries for which no space has been allocated in the sparsity pattern. This can be avoided by calling the DoFTools::make_flux_sparsity_pattern() function instead, which takes into account coupling between degrees of freedom on neighboring cells.

      There are other situations where bilinear forms contain non-local terms, for example in treating integral equations. These require different methods for building the sparsity patterns that depend on the exact formulation of the problem. You will have to do this yourself then.

      @@ -446,7 +446,7 @@ -\Delta \mathbf u + \nabla p &= 0,\\ \text{div}\ u &= 0 \end{align*}" src="form_1013.png"/>

      -

      in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

      +

      in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

      \[
 \left[
@@ -763,9 +763,9 @@
 <div class=

      LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)

      and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

      with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

      A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

      @@ -801,9 +801,9 @@

      with

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

      with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

      A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

      @@ -1161,27 +1161,27 @@

      This function is an updated version of the project_boundary_values_curl_conforming function. The intention is to fix a problem when using the previous function in conjunction with non-rectangular geometries (i.e. elements with non-rectangular faces). The L2-projection method used has been taken from the paper "Electromagnetic scattering simulation using an H (curl) conforming hp-finite element method in three dimensions" by PD Ledger, K Morgan and O Hassan ( Int. J. Num. Meth. Fluids, Volume 53, Issue 8, pages 1267-1296).

      -

      This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

      +

      This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

      Computing constraints

      To compute the constraints we use a projection method based upon the paper mentioned above. In 2d this is done in a single stage for the edge-based shape functions, regardless of the order of the finite element. In 3d this is done in two stages, edges first and then faces.

      -

      For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

      -

      $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

      -

      $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

      -

      with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

      -

      Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

      -

      For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

      -

      $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
-\vec{s}_{j}) dS$

      -

      $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
-\vec{s}_i) dS$

      -

      and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

      -

      The resulting constraints are then given in the solutions $x$ and $y$.

      +

      For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

      +

      $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

      +

      $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

      +

      with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

      +

      Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

      +

      For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

      +

      $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
+\vec{s}_{j}) dS$

      +

      $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
+\vec{s}_i) dS$

      +

      and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

      +

      The resulting constraints are then given in the solutions $x$ and $y$.

      If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      Arguments to this function

      This function is explicitly for use with FE_Nedelec elements, or with FESystem elements which contain FE_Nedelec elements. It will throw an exception if called with any other finite element. The user must ensure that FESystem elements are correctly setup when using this function as this check not possible in this case.

      -

      The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

      +

      The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

      The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

      -

      The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

      +

      The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

      See also
      Glossary entry on boundary indicators
      @@ -1264,11 +1264,11 @@ const Mapping< dim > & mapping&#href_anchor"memdoc"> -

      Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

      +

      Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

      This function is explicitly written to use with the FE_RaviartThomas elements. Thus it throws an exception, if it is called with other finite elements.

      If the AffineConstraints object constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      -

      The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

      -

      The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

      +

      The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

      +

      The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

      Computing constraints

      To compute the constraints we use interpolation operator proposed in Brezzi, Fortin (Mixed and Hybrid Finite Element Methods, Springer, 1991) on every face located at the boundary.

      See also
      Glossary entry on boundary indicators
      @@ -1359,16 +1359,16 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

      -

      This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

      +

      This function computes the constraints that correspond to boundary conditions of the form $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$, i.e., normal flux constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

      +

      This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

      Note
      This function doesn't make much sense in 1d, so it throws an exception if dim equals one.

      Arguments to this function

      -

      The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
-B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

      +

      The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
+B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

      The third argument denotes the set of boundary indicators on which the boundary condition is to be enforced. Note that, as explained below, this is one of the few functions where it makes a difference where we call the function multiple times with only one boundary indicator, or whether we call the function once with the whole set of boundary indicators at once.

      -

      Argument four (function_map) describes the boundary function $\vec
-u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

      -

      The mapping argument is used to compute the location of points on the boundary at which the function needs to request the normal vector $\vec n$ from the Manifold description if use_manifold_for_normal is set. If this parameter is not set, the mapping is used for computing the normal. This is useful, e.g., in the case that the mapping describes a deformation (e.g., MappingQCache, MappingQEulerian, MappingFEField).

      +

      Argument four (function_map) describes the boundary function $\vec
+u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

      +

      The mapping argument is used to compute the location of points on the boundary at which the function needs to request the normal vector $\vec n$ from the Manifold description if use_manifold_for_normal is set. If this parameter is not set, the mapping is used for computing the normal. This is useful, e.g., in the case that the mapping describes a deformation (e.g., MappingQCache, MappingQEulerian, MappingFEField).

      Note
      When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

      Computing constraints in 2d

      Computing these constraints requires some smarts. The main question revolves around the question what the normal vector is. Consider the following situation:

      @@ -1376,23 +1376,23 @@
      -

      Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
-\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

      -

      To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
-{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

      +

      Here, we have two cells that use a bilinear mapping (i.e., MappingQ(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain $\vec u \cdot \vec n=
+\vec u_\Gamma \cdot \vec n$ at the common vertex with the normal vector from both cells, then we constrain the vector $\vec u$ with respect to two linearly independent vectors; consequently, the constraint would be $\vec u=\vec u_\Gamma$ at this point (i.e. all components of the vector), which is not what we wanted.

      +

      To deal with this situation, the algorithm works in the following way: at each point where we want to constrain $\vec u$, we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain $\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n$ for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint $\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec
+{\bar n}$, where $\vec {\bar n}$ is the average of the two indicated normal vectors.

      Unfortunately, this is not quite enough. Consider the situation here:

      -

      If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

      +

      If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that $\vec u$ at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

      Consequently, we use the following heuristic to determine whether all normal vectors computed at one point are to be averaged: if two normal vectors for the same point are computed on different cells, then they are to be averaged. This covers the first example above. If they are computed from the same cell, then the fact that they are different is considered indication that they come from different parts of the boundary that might be joined by a real corner, and must not be averaged.

      There is one problem with this scheme. If, for example, the same domain we have considered above, is discretized with the following mesh, then we get into trouble:

      -

      Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

      +

      Here, the algorithm assumes that the boundary does not have a corner at the point where faces $F1$ and $F2$ join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

      Computing constraints in 3d

      The situation is more complicated in 3d. Consider the following case where we want to compute the constraints at the marked vertex:

      @@ -1512,7 +1512,7 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32.

      +

      This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition $\vec u \cdot \vec n= 0$. This function is used in step-31 and step-32.

      To compute no normal-flux constraints for a specific multigrid level for the geometric multigrid method, see compute_no_normal_flux_constraints_on_level().

      See also
      Glossary entry on boundary indicators
      @@ -1613,7 +1613,7 @@ const bool use_manifold_for_normal = true&#href_anchor"memdoc"> -

      Compute the constraints that correspond to boundary conditions of the form $\vec u \times \vec n=\vec u_\Gamma \times \vec n$, i.e., tangential flow constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is prescribed vector field whose tangential component(s) we want to be equal to the tangential component(s) of the solution. This function constrains exactly those dim-1 vector-valued components that are left unconstrained by VectorTools::compute_no_normal_flux_constraints(), and leaves the one component unconstrained that is constrained by that function.

      +

      Compute the constraints that correspond to boundary conditions of the form $\vec u \times \vec n=\vec u_\Gamma \times \vec n$, i.e., tangential flow constraints where $\vec u$ is a vector-valued solution variable and $\vec u_\Gamma$ is prescribed vector field whose tangential component(s) we want to be equal to the tangential component(s) of the solution. This function constrains exactly those dim-1 vector-valued components that are left unconstrained by VectorTools::compute_no_normal_flux_constraints(), and leaves the one component unconstrained that is constrained by that function.

      Further reading

      A description of some of the techniques used in this function, along with a discussion of difficulties encountered with this kind of boundary conditions can be found in [Engelman1982] .

      See also
      Glossary entry on boundary indicators
      @@ -1756,9 +1756,9 @@
      Id_c = project_to_constrained_linear_operator(constraints, linop);

      and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

      This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

      -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] /usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-12-27 18:25:15.592920349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-12-27 18:25:15.596920376 +0000 @@ -130,7 +130,7 @@ * fe_collection.push_back (FE_Q<dim>(degree)); *

      This way, one can add elements of polynomial degree 1 through 4 to the collection. It is not necessary to retain the added object: the collection makes a copy of it, it does not only store a pointer to the given finite element object. This same observation also holds for the other collection classes.

      It is customary that within an hp-finite element program, one keeps collections of finite elements and quadrature formulas with the same number of elements, each element of the one collection matching the element in the other. This is not necessary, but it often makes coding a lot simpler. If a collection of mappings is used, the same holds for hp::MappingCollection objects as well.

      -

      Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

      +

      Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

      By default, we assume that finite elements are stored in an ascending order based on their polynomial degree. If the order of elements differs, a corresponding hierarchy needs to be supplied to the collection via the hp::FECollection::set_hierarchy() member function.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-12-27 18:25:15.616920514 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-12-27 18:25:15.620920541 +0000 @@ -179,7 +179,7 @@
      -

      A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

      +

      A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

      Definition at line 692 of file mapping_q.h.

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-12-27 18:25:15.660920816 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-12-27 18:25:15.660920816 +0000 @@ -260,7 +260,7 @@

      From the examples above, it is obvious that if we encounter a cell that cannot be added to the cells which have already been entered, we can not usually point to a cell that is the culprit and that must be entered in a different orientation. Furthermore, even if we knew which cell, there might be large number of cells that would then cease to fit into the grid and which we would have to find a different orientation as well (in the second example above, if we rotated cell 1, then we would have to rotate the cells 1 through N-1 as well).

      A brute force approach to this problem is the following: if cell N can't be added, then try to rotate cell N-1. If we can't rotate cell N-1 any more, then try to rotate cell N-2 and try to add cell N with all orientations of cell N-1. And so on. Algorithmically, we can visualize this by a tree structure, where node N has as many children as there are possible orientations of node N+1 (in two space dimensions, there are four orientations in which each cell can be constructed from its four vertices; for example, if the vertex indices are {0 1 3 2}, then the four possibilities would be {0, 1, 3, 2}, {1, 3, 2, 0}, {3, 2, 0, 1}, and {2, 0, 1, 3}. When adding one cell after the other, we traverse this tree in a depth-first (pre-order) fashion. When we encounter that one path from the root (cell 0) to a leaf (the last cell) is not allowed (i.e. that the orientations of the cells which are encoded in the path through the tree does not lead to a valid triangulation), we have to track back and try another path through the tree.

      In practice, of course, we do not follow each path to a final node and then find out whether a path leads to a valid triangulation, but rather use an inductive argument: if for all previously added cells the triangulation is a valid one, then we can find out whether a path through the tree can yield a valid triangulation by checking whether entering the present cell would introduce any faces that have a nonunique direction; if that is so, then we can stop following all paths below this point and track back immediately.

      -

      Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

      +

      Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

      In fact, the two examples above show that the exponential estimate is not a pessimistic one: we indeed have to track back to one of the very first cells there to find a way to add all cells in a consistent fashion.

      This discouraging situation is greatly improved by the fact that we have an alternative algorithm for 2d that is always linear in runtime (discovered and implemented by Michael Anderson of TICAM, University of Texas, in 2003), and that for 3d we can find an algorithm that in practice is usually only roughly linear in time and memory. We will describe these algorithms in the following. A full description and theoretical analysis is given in [AABB17] .

      The 2d linear complexity algorithm

      /usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-12-27 18:25:15.700921091 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-12-27 18:25:15.708921146 +0000 @@ -310,7 +310,7 @@
      },
      1000);
      void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
      Definition parallel.h:165
      -

      In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

      InputIterator1 in_1 = x.begin();
      +

      In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

      InputIterator1 in_1 = x.begin();
      InputIterator2 in_2 = y.begin();
      OutputIterator out = z.begin();
      @@ -403,7 +403,7 @@
      }
      void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
      Definition parallel.h:452

      Here, we call the vmult_on_subrange function on sub-ranges of at least 200 elements each, so that the initial setup cost can amortize.

      -

      A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

      double SparseMatrix::mat_norm (const Vector &x) const
      +

      A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

      double SparseMatrix::mat_norm (const Vector &x) const
      {
      const double *val_ptr = &values[0];
      const unsigned int *colnum_ptr = &colnums[0];
      @@ -606,7 +606,7 @@

    • -

      The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

      +

      The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

      The way to avoid this is to put the FEValues object into a second structure that will hold scratch data, and initialize it in the constructor:

      struct PerTaskData {
      FullMatrix<double> cell_matrix;
      Vector<double> cell_rhs;
      /usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-12-27 18:25:15.748921420 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-12-27 18:25:15.756921475 +0000 @@ -294,8 +294,8 @@ \right) \end{eqnarray*}" src="form_302.png"/>

      -

      indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

      -

      Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

      +

      indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

      +

      Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

      \begin{eqnarray*}
   V =
   \left(
@@ -463,7 +463,7 @@
 <p class=

    • -

      These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

      +

      These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

    • @@ -607,10 +607,10 @@
      }
    • So if, again, this is not the code we use in step-8, what do we do there? The answer rests on the finite element we use. In step-8, we use the following element:

      FESystem<dim> finite_element (FE_Q<dim>(1), dim);
      -

      In other words, the finite element we use consists of dim copies of the same scalar element. This is what we call a primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
+</div><!-- fragment --><p> In other words, the finite element we use consists of <code>dim</code> copies of the same scalar element. This is what we call a <a class=primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
    \partial_y\varphi_y(x,y,z) + \partial_z\varphi_z(x,y,z)$ of a vector-valued shape function $\Phi(x,y,z)=(\varphi_x(x,y,z), \varphi_y(x,y,z), \varphi_z(x,y,z))^T$ is, in the present case, either $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z)$, $\mathrm{div}\ \Phi(x,y,z)=\partial_y\varphi_y(x,y,z)$, or $\mathrm{div}\ \Phi(x,y,z)=\partial_z\varphi_z(x,y,z)$, because exactly one of the $\varphi_\ast$ is nonzero. Knowing this means that we can save a number of computations that, if we were to do them, would only yield zeros to add up.

      In a similar vein, if only one component of a shape function is nonzero, then only one row of its gradient $\nabla\Phi$ is nonzero. What this means for terms like $(\mu \nabla\Phi_i,\nabla\Phi_j)$, where the scalar product between two tensors is defined as $(\tau, \gamma)_\Omega=\int_\Omega \sum_{i,j=1}^d \tau_{ij} \gamma_{ij}$, is that the term is only nonzero if both tensors have their nonzero entries in the same row, which means that the two shape functions have to have their single nonzero component in the same location.

      -

      If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

      +

      If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

      The vehicle for all this is the ability to determine which vector component is going to be nonzero. This information is provided by the FiniteElement::system_to_component_index function. What can be done with it, using the example above, is explained in detail in step-8.

      Block solvers

      Using techniques as shown above, it isn't particularly complicated to assemble the linear system, i.e. matrix and right hand side, for a vector-valued problem. However, then it also has to be solved. This is more complicated. Naively, one could just consider the matrix as a whole. For most problems, this matrix is not going to be definite (except for special cases like the elasticity equations covered in step-8 and step-17). It will, often, also not be symmetric. This rather general class of matrices presents problems for iterative solvers: the lack of structural properties prevents the use of most efficient methods and preconditioners. While it can be done, the solution process will therefore most often be slower than necessary.

      @@ -628,7 +628,7 @@ \right), \end{eqnarray*}" src="form_337.png"/>

      -

      where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

      +

      where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

      By default, this is not what happens, however. Rather, deal.II assigns numbers to degrees of freedom in a rather random manner. Consequently, if you form a vector out of the values of degrees of freedom will not be neatly ordered in a vector like

      \begin{eqnarray*}
   \left(
@@ -668,8 +668,8 @@
   MU = F-BP.
 \end{eqnarray*}

      -

      This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

      -

      How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

      +

      This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

      +

      How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

      This is where the BlockVector, BlockSparseMatrix, and similar classes come in. For all practical purposes, then can be used as regular vectors or sparse matrices, i.e. they offer element access, provide the usual vector operations and implement, for example, matrix-vector multiplications. In other words, assembling matrices and right hand sides works in exactly the same way as for the non-block versions. That said, internally they store the elements of vectors and matrices in "blocks"; for example, instead of using one large array, the BlockVector class stores it as a set of arrays each of which we call a block. The advantage is that, while the whole thing can be used as a vector, one can also access an individual block which then, again, is a vector with all the vector operations.

      To show how to do this, let us consider the second equation $MU=F-BP$ to be solved above. This can be achieved using the following sequence similar to what we have in step-20:

      Vector<double> tmp (solution.block(0).size());
      system_matrix.block(0,1).vmult (tmp, solution.block(1));
      @@ -689,7 +689,7 @@ -

      What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

      +

      What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

      Extracting data from solutions

      Once one has computed a solution, it is often necessary to evaluate it at quadrature points, for example to evaluate nonlinear residuals for the next Newton iteration, to evaluate the finite element residual for error estimators, or to compute the right hand side for the next time step in a time dependent problem.

      The way this is done us to again use an FEValues object to evaluate the shape functions at quadrature points, and with those also the values of a finite element function. For the example of the mixed Laplace problem above, consider the following code after solving:

      std::vector<Vector<double> > local_solution_values (n_q_points,
      /usr/share/doc/packages/dealii/doxygen/deal.II/index.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-12-27 18:25:15.780921640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-12-27 18:25:15.784921667 +0000 @@ -132,7 +132,7 @@
    • DoFHandler: DoFHandler objects are the confluence of triangulations and finite elements: the finite element class describes how many degrees of freedom it needs per vertex, line, or cell, and the DoFHandler class allocates this space so that each vertex, line, or cell of the triangulation has the correct number of them. It also gives them a global numbering.

      -

      A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

      +

      A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

      Just as with triangulation objects, most operations on DoFHandlers are done by looping over all cells and doing something on each or a subset of them. The interfaces of the two classes are therefore rather similar: they allow to get iterators to the first and last cell (or face, or line, etc) and offer information through these iterators. The information that can be gotten from these iterators is the geometric and topological information that can already be gotten from the triangulation iterators (they are in fact derived classes) as well as things like the global numbers of the degrees of freedom on the present cell. On can also ask an iterator to extract the values corresponding to the degrees of freedom on the present cell from a data vector that stores values for all degrees of freedom associated with a triangulation.

      It is worth noting that, just as triangulations, DoFHandler classes do not know anything about the mapping from the unit cell to its individual cells. It is also ignorant of the shape functions that correspond to the degrees of freedom it manages: all it knows is that there are, for example, 2 degrees of freedom for each vertex and 4 per cell interior. Nothing about their specifics is relevant to the DoFHandler class with the exception of the fact that they exist.

      The DoFHandler class and its associates are described in the Degrees of Freedom topic. In addition, there are specialized versions that can handle multilevel and hp-discretizations. These are described in the Multilevel support and hp-finite element support topics. Finite element methods frequently imply constraints on degrees of freedom, such as for hanging nodes or nodes at which boundary conditions apply; dealing with such constraints is described in the Constraints on degrees of freedom topic.

      /usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-12-27 18:25:15.808921832 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-12-27 18:25:15.808921832 +0000 @@ -155,7 +155,7 @@
    • -

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      +

      Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

      IndexSet is (N);
      is.add_range(0, N);

      This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

      if (my_index_set == complete_index_set(my_index_set.size())
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-12-27 18:25:15.836922024 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-12-27 18:25:15.836922024 +0000 @@ -145,11 +145,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Check if data on all children match, and return value of the first child.

      -\[
+<picture><source srcset=\[
   d_{K_p} = d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2234.png"/>

      @@ -173,13 +173,13 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return sum of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2235.png"/>

      -

      This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

      +

      This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

      @@ -200,15 +200,15 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc"> -

      Return $ l_2 $-norm of data on all children.

      +

      Return $ l_2 $-norm of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p}^2 = \sum d_{K_c}^2
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2237.png"/>

      -

      This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

      +

      This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

      @@ -231,11 +231,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return mean value of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2238.png"/>

      @@ -259,11 +259,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

      Return maximum value of data on all children.

      -\[
+<picture><source srcset=\[
   d_{K_p} = \max \left( d_{K_c} \right)
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2239.png"/>

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-12-27 18:25:15.860922189 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-12-27 18:25:15.864922217 +0000 @@ -141,11 +141,11 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector containing copies of data of the parent cell for each child.

      -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2231.png"/>

      @@ -169,13 +169,13 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector which contains data of the parent cell being equally divided among all children.

      -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2232.png"/>

      -

      This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

      +

      This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

      @@ -198,13 +198,13 @@ const value_type parent_value&#href_anchor"memdoc">

      Return a vector which contains squared data of the parent cell being equally divided among the squares of all children.

      -\[
+<picture><source srcset=\[
   d_{K_c}^2 = d_{K_p}^2 / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2233.png"/>

      -

      This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

      +

      This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-12-27 18:25:15.888922381 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-12-27 18:25:15.888922381 +0000 @@ -128,7 +128,7 @@
      -

      The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

      +

      The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

      By passing a set of enums of the current kind to the DataOut_DoFData::add_data_vector functions, this can be achieved.

      See the step-22 tutorial program for an example on how this information can be used in practice.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-12-27 18:25:15.940922739 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-12-27 18:25:15.944922766 +0000 @@ -551,7 +551,7 @@

      While this discussion applies to two spatial dimensions, it is more complicated in 3d. The reason is that we could still use patches, but it is difficult when trying to visualize them, since if we use a cut through the data (by, for example, using x- and z-coordinates, a fixed y-value and plot function values in z-direction, then the patched data is not a patch in the sense GNUPLOT wants it any more. Therefore, we use another approach, namely writing the data on the 3d grid as a sequence of lines, i.e. two points each associated with one or more data sets. There are therefore 12 lines for each subcells of a patch.

      Given the lines as described above, a cut through this data in Gnuplot can then be achieved like this:

      *   set data style lines
       *   splot [:][:][0:] "T" using 1:2:(\$3==.5 ? \$4 : -1)
      -* 

      This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

      +*

      This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

      More complex cuts are possible, including nonlinear ones. Note however, that only those points which are actually on the cut-surface are plotted.

      Definition at line 3556 of file data_out_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-12-27 18:25:15.972922958 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-12-27 18:25:15.980923013 +0000 @@ -133,17 +133,17 @@

      Detailed Description

      This namespace provides functions that compute a cell-wise approximation of the norm of a derivative of a finite element field by taking difference quotients between neighboring cells. This is a rather simple but efficient form to get an error indicator, since it can be computed with relatively little numerical effort and yet gives a reasonable approximation.

      -

      The way the difference quotients are computed on cell $K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
-}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
-\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
-\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
+<p>The way the difference quotients are computed on cell <picture><source srcset=$K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
+}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
+\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
+\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
 \right) \nabla u(x_K) \approx \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|}
-\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

      -

      Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
-\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
+\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

      +

      Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
+\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \frac{u_h(x_{K'}) - u_h(x_K)}{
-\|y_{K'}\| } \right).$ This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

      -

      The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

      --------------------------------------------------------
      +\|y_{K'}\| } \right).$" src="form_2259.png"/> This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

      +

      The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

      --------------------------------------------------------
      An error occurred in line <749>
      of file <source/numerics/derivative_approximation.cc> in function
      void DerivativeApproximation::approximate(...)
      @@ -161,19 +161,19 @@
      DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)

      As can easily be verified, this can only happen on very coarse grids, when some cells and all their neighbors have not been refined even once. You should therefore only call the functions of this class if all cells are at least once refined. In practice this is not much of a restriction.

      Approximation of higher derivatives

      -

      Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula $ \nabla^2 u(x_K) \approx Y^{-1}
+<p>Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula   <picture><source srcset=$ \nabla^2 u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \otimes \frac{\nabla u_h(x_{K'})
-- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $ where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
-\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

      -

      The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

      +- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $" src="form_2261.png"/> where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

      +

      The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

      Even higher than the second derivative can be obtained along the same lines as exposed above.

      Refinement indicators based on the derivatives

      -

      If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
-\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
-\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

      +

      If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
+\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
+\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

      Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

      Implementation

      -

      The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

      +

      The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

      Due to this way of operation, the class may be easily extended for higher order derivatives than are presently implemented. Basically, only an additional class along the lines of the derivative descriptor classes Gradient and SecondDerivative has to be implemented, with the respective alias and functions replaced by the appropriate analogues for the derivative that is to be approximated.

      Function Documentation

      @@ -281,7 +281,7 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

      This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

      +

      This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

      The last parameter denotes the solution component, for which the gradient is to be computed. It defaults to the first component. For scalar elements, this is the only valid choice; for vector-valued ones, any component between zero and the number of vector components can be given here.

      In a parallel computation the solution vector needs to contain the locally relevant unknowns.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-12-27 18:25:16.096923810 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-12-27 18:25:16.104923865 +0000 @@ -2520,7 +2520,7 @@

      Return an Expression representing a scalar symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

      +

      For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

      Parameters
      @@ -3660,7 +3660,7 @@

      Return a substitution map that has any explicit interdependencies between the entries of the input substitution_map resolved.

      The force_cyclic_dependency_resolution flag exists to ensure, if desired, that no cyclic dependencies can exist in the returned map. If a cyclic dependency exists in the input substitution map, substitution_map, then with this flag set to true the dependency cycle is broken by a dictionary-ordered substitution. For example, if the substitution map contains two entries map["a"] -> "b" and map["b"] -> "a", then the result of calling this function would be a map with the elements map["a"] -> "a" and map["b"] -> "a".

      If one symbol is an explicit function of another, and it is desired that all their values are completely resolved, then it may be necessary to perform substitution a number of times before the result is finalized. This function performs substitution sweeps for a set of symbolic variables until all explicit relationships between the symbols in the map have been resolved. Whether each entry returns a symbolic or real value depends on the nature of the values stored in the substitution map. If the values associated with a key are also symbolic then the returned result may still be symbolic in nature. The terminal result of using the input substitution map, symbol_values, is then guaranteed to be rendered by a single substitution of the returned dependency-resolved map.

      -

      Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

      +

      Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

      @@ -3720,11 +3720,11 @@ If the symbols stored in the map are explicitly dependent on one another, then the returned result depends on the order in which the map is traversed. It is recommended to first resolve all interdependencies in the map using the resolve_explicit_dependencies() function.

      Examples:

      1. -

        If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

        +

        If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
      3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
      @@ -3875,7 +3875,7 @@
      [in]symbolAn identifier (or name) for the returned symbolic variable.

      Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

      +

      For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

      Template Parameters
      @@ -3950,7 +3950,7 @@
      dimThe dimension of the returned tensor.

      Return a symmetric tensor of Expressions representing a tensorial symbolic variable with the identifier specified by symbol.

      -

      For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

      +

      For example, if the symbol is the string "S" then the tensorial symbolic variable that is returned represents the vector $S$. Each component of $S$ is prefixed by the given symbol, and has a suffix that indicates its component indices.

      Template Parameters
      @@ -4115,7 +4115,7 @@
      rankThe rank of the returned tensor.
      -
      Returns
      The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
      +
      Returns
      The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
      @@ -4144,7 +4144,7 @@
    -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4173,7 +4173,7 @@
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4202,7 +4202,7 @@
    -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4231,7 +4231,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4260,7 +4260,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4289,7 +4289,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4318,7 +4318,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4347,8 +4347,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-12-27 18:25:16.164924277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-12-27 18:25:16.168924304 +0000 @@ -236,13 +236,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill-McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition fe_system.h:208
    Definition fe_q.h:554
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -255,7 +255,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_991.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -467,7 +467,7 @@
    -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -561,7 +561,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 997 of file dof_renumbering.cc.

    @@ -646,7 +646,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-12-27 18:25:16.248924853 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-12-27 18:25:16.256924908 +0000 @@ -325,7 +325,7 @@

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma
-\varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +\varphi_{b2d(i)} \varphi_{b2d(j)} dx$" src="form_1008.png"/>, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    DoF coupling between surface triangulations and bulk triangulations

    When working with Triangulation and DoFHandler objects of different co-dimension, such as a Triangulation<2,3>, describing (part of) the boundary of a Triangulation<3>, and their corresponding DoFHandler objects, one often needs to build a one-to-one matching between the degrees of freedom that live on the surface Triangulation and those that live on the boundary of the bulk Triangulation. The GridGenerator::extract_boundary_mesh() function returns a mapping of surface cell iterators to face iterators, that can be used by the function map_boundary_to_bulk_dof_iterators() to construct a map between cell iterators of the surface DoFHandler, and the corresponding pair of cell iterator and face index of the bulk DoFHandler. Such map can be used to initialize FEValues and FEFaceValues for the corresponding DoFHandler objects. Notice that one must still ensure that the ordering of the quadrature points coincide in the two objects, in order to build a coupling matrix between the two sytesm.

    Enumeration Type Documentation

    @@ -500,7 +500,7 @@

    Here, combined_orientation is the relative orientation of face_1 with respect to face_2. This is typically computed by GridTools::orthogonal_equality().

    Optionally a matrix matrix along with a std::vector first_vector_components can be specified that describes how DoFs on face_1 should be modified prior to constraining to the DoFs of face_2. Here, two declarations are possible: If the std::vector first_vector_components is non empty the matrix is interpreted as a dim $\times$ dim rotation matrix that is applied to all vector valued blocks listed in first_vector_components of the FESystem. If first_vector_components is empty the matrix is interpreted as an interpolation matrix with size no_face_dofs $\times$ no_face_dofs.

    This function makes sure that identity constraints don't create cycles in constraints.

    -

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    +

    periodicity_factor can be used to implement Bloch periodic conditions (a.k.a. phase shift periodic conditions) of the form $\psi(\mathbf{r})=e^{-i\mathbf{k}\cdot\mathbf{r}}u(\mathbf{r})$ where $u$ is periodic with the same periodicity as the crystal lattice and $\mathbf{k}$ is the wavevector, see https://en.wikipedia.org/wiki/Bloch_wave. The solution at face_2 is equal to the solution at face_1 times periodicity_factor. For example, if the solution at face_1 is $\psi(0)$ and $\mathbf{d}$ is the corresponding point on face_2, then the solution at face_2 should be $\psi(d) = \psi(0)e^{-i \mathbf{k}\cdot \mathbf{d}}$. This condition can be implemented using $\mathrm{periodicity\_factor}=e^{-i \mathbf{k}\cdot \mathbf{d}}$.

    Detailed information can be found in the see Glossary entry on periodic boundary conditions.

    Definition at line 3591 of file dof_tools_constraints.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-12-27 18:25:16.276925046 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-12-27 18:25:16.280925073 +0000 @@ -178,7 +178,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 29 of file fe_series.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-12-27 18:25:16.328925403 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-12-27 18:25:16.328925403 +0000 @@ -395,17 +395,17 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    \begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
 \end{align*}

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    \begin{align*}
   I = C X^T
 \end{align*}

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    +

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    This function therefore computes this matrix $X$, for the following specific circumstances:

    -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-12-27 18:25:16.352925567 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-12-27 18:25:16.356925595 +0000 @@ -139,10 +139,10 @@
    -

    An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

    +

    An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

    This enum is used in the FiniteElement::compare_for_domination() function that is used in the context of hp-finite element methods when determining what to do at faces where two different finite elements meet (see the hp-paper for a more detailed description of the following). In that case, the degrees of freedom of one side need to be constrained to those on the other side. The determination which side is which is based on the outcome of a comparison for mutual domination: the dominated side is constrained to the dominating one.

    -

    Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

    -

    It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

    +

    Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

    +

    It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

    Finally, the code no_requirements exists for cases where elements impose no continuity requirements. The case is primarily meant for FE_Nothing which is an element that has no degrees of freedom in a subdomain. It could also be used by discontinuous elements, for example.

    More details on domination can be found in the hp-paper.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-12-27 18:25:16.376925732 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-12-27 18:25:16.380925760 +0000 @@ -143,12 +143,12 @@ - + - + - + - +
    const unsigned int component = 0&#href_anchor"memdoc"> -

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    +

    Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

    Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

    The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

    $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
    + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

    -

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    +

    Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

    $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

    If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-12-27 18:25:16.472926391 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-12-27 18:25:16.480926446 +0000 @@ -281,7 +281,7 @@
    const bool colorize = false&#href_anchor"memdoc"> -

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    +

    Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

    If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

    @@ -725,7 +725,7 @@
    const bool colorize = false&#href_anchor"memdoc"> -

    Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      +

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

      1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
      2. @@ -749,10 +749,10 @@
        Parameters
        - + - +
        triaTriangulation to be created. Must be empty upon calling this function.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
        n_shellsNumber of shells to use in the shell layer.
        skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
        @@ -1123,12 +1123,12 @@
    const unsigned int n_rotate_middle_square&#href_anchor"memdoc"> -

    Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

    +

    Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

    This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

    Parameters
    - +
    [out]triaThe input triangulation.
    [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
    [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
    @@ -1165,7 +1165,7 @@
    const bool manipulate_left_cube&#href_anchor"memdoc"> -

    Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

    +

    Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

    This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

    Parameters
    @@ -1316,7 +1316,7 @@ - +
    const double half_length = 1.&#href_anchor"memdoc"> -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    Precondition
    The triangulation passed as argument needs to be empty when calling this function.
    @@ -1351,7 +1351,7 @@
    const double half_length = 1.&#href_anchor"memdoc"> -

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    +

    Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

    The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

    The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

    @@ -1457,7 +1457,7 @@
    triaAn empty triangulation which will hold the pipe junction geometry.
    openingsCenter point and radius of each of the three openings. The container has to be of size three.
    bifurcationCenter point of the bifurcation and hypothetical radius of each truncated cone at the bifurcation.
    aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
    aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
    @@ -1472,13 +1472,13 @@
    Point Radius
    Openings $(2,0,0)$ $1$
    Openings $(2,0,0)$ $1$
    $(0,2,0)$ $1$
    $(0,2,0)$ $1$
    $(0,0,2)$ $1$
    $(0,0,2)$ $1$
    Bifurcation $(0,0,0)$ $1$
    Bifurcation $(0,0,0)$ $1$
    @@ -1491,13 +1491,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(0,2,0)$ $1$ +$(0,2,0)$ $1$ -$(2,0,0)$ $1$ +$(2,0,0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$
    @@ -1510,13 +1510,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(1,\sqrt{3},0)$ $1$ +$(1,\sqrt{3},0)$ $1$ -$(1,-\sqrt{3},0)$ $1$ +$(1,-\sqrt{3},0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$

    Definition at line 266 of file grid_generator_pipe_junction.cc.

    @@ -1551,7 +1551,7 @@
    Parameters
    - +
    triaA Triangulation object which has to be empty.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
    colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
    @@ -1738,9 +1738,9 @@
  • 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
  • -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
  • +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively. -

    The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

    +

    The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

    The 3d grids with 12 and 96 cells are plotted below:

    @@ -1893,7 +1893,7 @@ const bool colorize = false&#href_anchor"memdoc">

    Produce a domain that is the intersection between a hyper-shell with given inner and outer radius, i.e. the space between two circles in two space dimensions and the region between two spheres in 3d, and the positive quadrant (in 2d) or octant (in 3d). In 2d, this is indeed a quarter of the full annulus, while the function is a misnomer in 3d because there the domain is not a quarter but one eighth of the full shell.

    If the number of initial cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio in 2d.

    -

    If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

    +

    If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

    All manifold ids are set to zero, and a SphericalManifold is attached to the triangulation.

    Precondition
    The triangulation passed as argument needs to be empty when calling this function.
    @@ -1941,7 +1941,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    +

    Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

    If n_radial_cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio. The same holds for n_axial_cells.

    If colorize is set to true, a boundary id of 0 is set for the inner cylinder, a boundary id of 1 is set for the outer cylinder, a boundary id of 2 is set for the bottom (z-) boundary and a boundary id of 3 is set for the top (z+) boundary.

    Note
    Although this function is declared as a template, it does not make sense in 1d and 2d. Also keep in mind that this object is rotated and positioned differently than the one created by cylinder().
    @@ -1989,9 +1989,9 @@
    -

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    +

    Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

    If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

    -

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    +

    An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

    @@ -2003,7 +2003,7 @@ centerline_radiusThe radius of the circle which forms the center line of the torus containing the loop of cells. Must be greater than inner_radius. inner_radiusThe distance between the inner edge of the torus and origin. n_cells_toroidalOptional argument to set the number of cell layers in toroidal direction. The default is 6 cell layers. - phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$. + phiOptional argument to generate an open torus with angle $0 < \varphi <= 2 \pi$. The default value is $2 \pi$, in which case a closed torus is generated. If the torus is open, the torus is cut at two planes perpendicular to the torus centerline. The center of these two planes are located at $(x_1, y_1, z_1) = (R, 0, 0)$ and $(x_2, y_2, z_2) = (R \cos(\varphi), 0, R \sin(\varphi))$.
    @@ -2048,8 +2048,8 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-12-27 18:25:16.512926666 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-12-27 18:25:16.520926721 +0000 @@ -340,12 +340,12 @@ const unsigned int order = 2&#href_anchor"memdoc">

    This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    -

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    +

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    \[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
 \]

    -

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    +

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    \[
   \eta^\text{exp}(m)
   =
@@ -354,8 +354,8 @@
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
 \]

    -

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    -

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    +

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    +

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    \[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
 \] @@ -364,7 +364,7 @@

    The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

    -

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    +

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    Note
    This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

    Definition at line 447 of file grid_refinement.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-12-27 18:25:16.628927463 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-12-27 18:25:16.636927518 +0000 @@ -469,7 +469,7 @@ Triangulation< dim, spacedim > & triangulation&#href_anchor"memdoc">

    Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

    -

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    Triangulation<dim> triangulation;
    +

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    Triangulation<dim> triangulation;
    ... // fill triangulation with something
    {
    @@ -649,13 +649,13 @@ const bool solve_for_absolute_positions = false&#href_anchor"memdoc">

    Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

    -

    The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1463.png"/>

    subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

    Parameters
    @@ -2126,7 +2126,7 @@

    This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

    The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

    -

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    +

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

    In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

    Note
    If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
    @@ -2549,7 +2549,7 @@ const FullMatrix< double > & matrix = FullMatrix<double>()&#href_anchor"memdoc">

    An orthogonal equality test for faces.

    face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation. If no such relation exists then the returned std::optional object is empty (i.e., has_value() will return false).

    -

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    +

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    If the matching was successful, the relative orientation of face1 with respect to face2 is returned a std::optional<unsigned char>, in which the stored value is the same orientation bit format used elsewhere in the library. More information on that topic can be found in the glossary article.

    Definition at line 2426 of file grid_tools_dof_handlers.cc.

    @@ -2600,8 +2600,8 @@

    This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

    The unsigned char that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

    The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

    -

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    -

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    +

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    +

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    Template Parameters
    @@ -3131,8 +3131,8 @@
    MeshTypeA type that satisfies the requirements of the MeshType concept.
    -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3164,8 +3164,8 @@ const Mapping< dim, spacedim > & mapping&#href_anchor"memdoc"> -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3303,8 +3303,8 @@
    -

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    +

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    @@ -3335,7 +3335,7 @@ const Quadrature< dim > & quadrature&#href_anchor"memdoc"> -

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    +

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    Note
    Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
    Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
    @@ -3537,7 +3537,7 @@ const double tol = 1e-12&#href_anchor"memdoc">

    Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

    -

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    +

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

    Definition at line 348 of file grid_tools_topology.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 2024-12-27 18:25:16.668927737 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLineMinimization.html 2024-12-27 18:25:16.672927765 +0000 @@ -161,7 +161,7 @@
    -

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and the gradient $g(x\_low)$, return the local minimizer of the quadratic interpolation function.

    +

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and the gradient $g(x\_low)$, return the local minimizer of the quadratic interpolation function.

    The return type is optional to fit with similar functions that may not have a solution for given parameters.

    @@ -206,7 +206,7 @@
    -

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and its gradients ( $g(x\_low)*g(x\_hi) < 0$) at those points, return the local minimizer of the cubic interpolation function (that is, the location where the cubic interpolation function attains its minimum value).

    +

    Given $x\_low$ and $x\_hi$ together with values of function $f(x\_low)$ and $f(x\_hi)$ and its gradients ( $g(x\_low)*g(x\_hi) < 0$) at those points, return the local minimizer of the cubic interpolation function (that is, the location where the cubic interpolation function attains its minimum value).

    The return type is optional as the real-valued solution might not exist.

    @@ -256,7 +256,7 @@
    -

    Find the minimizer of a cubic polynomial that goes through the points $f\_low=f(x\_low)$, $f\_hi=f(x\_hi)$ and $f\_rec(x\_rec)$ and has derivatve $g\_low$ at $x\_low$.

    +

    Find the minimizer of a cubic polynomial that goes through the points $f\_low=f(x\_low)$, $f\_hi=f(x\_hi)$ and $f\_rec(x\_rec)$ and has derivatve $g\_low$ at $x\_low$.

    The return type is optional as the real-valued solution might not exist.

    @@ -438,15 +438,15 @@ const bool debug_output = false&#href_anchor"memdoc"> -

    Perform a line search in $(0,max]$ with strong Wolfe conditions

    -\[
+<p>Perform a line search in <picture><source srcset=$(0,max]$ with strong Wolfe conditions

    +\[
 f(\alpha) \le f(0) + \alpha \mu f'(0) \\
 |f'(\alpha)| \le \eta |f'(0)|
-\] +\]" src="form_2502.png"/>

    using the one dimensional function func in conjunction with a function interpolate to choose a new point from the interval based on the function values and derivatives at its ends. The parameter a1 is a trial estimate of the first step. Interpolation can be done using either poly_fit() or poly_fit_three_points(), or any other function that has a similar signature.

    The function implements Algorithms 2.6.2 and 2.6.4 on pages 34-35 in [Fletcher2013]. These are minor variations of Algorithms 3.5 and 3.6 on pages 60-61 in [Nocedal2006]. It consists of a bracketing phase and a zoom phase, where interpolate is used.

    -

    Two examples of use might be as follows: In the first example, we wish to find the minimum of the function $100 * x^4 + (1-x)^2$. To find the approximate solution using line search with a polynomial fit to the curve one would perform the following steps:

    +

    Two examples of use might be as follows: In the first example, we wish to find the minimum of the function $100 * x^4 + (1-x)^2$. To find the approximate solution using line search with a polynomial fit to the curve one would perform the following steps:

    auto func = [](const double x)
    {
    const double f = 100. * std::pow(x, 4) + std::pow(1. - x, 2); // Value
    @@ -570,7 +570,7 @@ funcA one dimensional function which returns value and derivative at the given point. f0The function value at the origin. g0The function derivative at the origin. - interpolateA function which determines how interpolation is done during the zoom phase. It takes values and derivatives at the current interval/bracket ( $f\_low$, $f\_hi$) as well as up to 5 values and derivatives at previous steps. The returned value is to be provided within the given bounds. + interpolateA function which determines how interpolation is done during the zoom phase. It takes values and derivatives at the current interval/bracket ( $f\_low$, $f\_hi$) as well as up to 5 values and derivatives at previous steps. The returned value is to be provided within the given bounds. a1Initial trial step for the bracketing phase. etaA parameter in the second Wolfe condition (curvature condition). muA parameter in the first Wolfe condition (sufficient decrease). /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-12-27 18:25:16.700927957 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-12-27 18:25:16.704927984 +0000 @@ -141,9 +141,9 @@

    The namespace L2 contains functions for mass matrices and L2-inner products.

    Notational conventions

    In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

    -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1639.png"/>

    it will yield the following results, depending on the type of operation

    -

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    +

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    Signature of functions

    Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

    template <int dim>
    void
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-12-27 18:25:16.736928204 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Advection.html 2024-12-27 18:25:16.740928232 +0000 @@ -175,8 +175,8 @@ const double factor = 1.&#href_anchor"memdoc">

    Advection along the direction w in weak form with derivative on the test function

    -\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
-\, dx. \] +\[ m_{ij} = \int_Z u_j\,(\mathbf w \cdot \nabla) v_i
+\, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the advection operator is applied to each component separately.

    Parameters
    @@ -235,7 +235,7 @@

    Scalar advection residual operator in strong form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)u\, v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -284,8 +284,8 @@

    Vector-valued advection residual operator in strong form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
-\cdot\mathbf v_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf u\bigr)
+\cdot\mathbf v_i \, dx. \]

    Warning
    This is not the residual consistent with cell_matrix(), but with its transpose.
    @@ -334,7 +334,7 @@

    Scalar advection residual operator in weak form

    -\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \] +\[ r_i = \int_Z  (\mathbf w \cdot \nabla)v\, u_i \, dx. \]

    Definition at line 215 of file advection.h.

    @@ -382,8 +382,8 @@

    Vector-valued advection residual operator in weak form

    -\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
-\cdot\mathbf u_i \, dx. \] +\[ r_i = \int_Z \bigl((\mathbf w \cdot \nabla) \mathbf v\bigr)
+\cdot\mathbf u_i \, dx. \]

    Definition at line 255 of file advection.h.

    @@ -423,11 +423,11 @@ double factor = 1.&#href_anchor"memdoc">

    Upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and zero else:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 [\mathbf w\cdot\mathbf n]_+
 u_i v_j \, ds
-\] +\]" src="form_1593.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -481,13 +481,13 @@

    Scalar case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1594.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -540,13 +540,13 @@

    Vector-valued case: Residual for upwind flux at the boundary for weak advection operator. This is the value of the trial function at the outflow boundary and the value of the incoming boundary condition on the inflow boundary:

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_{\partial\Omega}
 (\mathbf w\cdot\mathbf n)
 \widehat u v_j \, ds
-\] +\]" src="form_1594.png"/>

    -

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    +

    Here, the numerical flux $\widehat u$ is the upwind value at the face, namely the finite element function whose values are given in the argument input on the outflow boundary. On the inflow boundary, it is the inhomogeneous boundary value in the argument data.

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected by the same velocity.

    @@ -612,13 +612,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -675,13 +675,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Scalar case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    @@ -738,13 +738,13 @@ const double factor = 1.&#href_anchor"memdoc">

    Vector-valued case: Upwind flux in the interior for weak advection operator. Matrix entries correspond to the upwind value of the trial function, multiplied by the jump of the test functions

    -\[
+<picture><source srcset=\[
 a_{ij} = \int_F \left|\mathbf w
 \cdot \mathbf n\right|
 u^\uparrow
 (v^\uparrow-v^\downarrow)
 \,ds
-\] +\]" src="form_1596.png"/>

    The velocity is provided as an ArrayView, having dim vectors, one for each velocity component. Each of the vectors must either have only a single entry, if the advection velocity is constant, or have an entry for each quadrature point.

    The finite element can have several components, in which case each component is advected the same way.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-12-27 18:25:16.776928479 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-12-27 18:25:16.784928534 +0000 @@ -170,7 +170,7 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for divergence. The derivative is on the trial function.

    -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    @@ -206,8 +206,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in strong form.

    -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -244,8 +244,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in weak form.

    -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

    This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

    Todo
    Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -282,8 +282,8 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for gradient. The derivative is on the trial function.

    -\[
-\int_Z \nabla u \cdot \mathbf v\,dx \] +\[
+\int_Z \nabla u \cdot \mathbf v\,dx \]

    This is the strong gradient and the trial space should be at least in H1. The test functions can be discontinuous.

    @@ -319,8 +319,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in strong form.

    -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

    This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

    The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -357,8 +357,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in weak form.

    -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

    This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

    Todo
    Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -395,7 +395,7 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

    Definition at line 258 of file divergence.h.

    @@ -435,9 +435,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1604.png"/>

    Definition at line 291 of file divergence.h.

    @@ -472,9 +472,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

    -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1605.png"/>

    Definition at line 323 of file divergence.h.

    @@ -534,10 +534,10 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1606.png"/>

    Definition at line 357 of file divergence.h.

    @@ -587,12 +587,12 @@ double factor = 1.&#href_anchor"memdoc">

    The jump of the normal component

    -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1607.png"/>

    Definition at line 416 of file divergence.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-12-27 18:25:16.808928699 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-12-27 18:25:16.816928754 +0000 @@ -167,7 +167,7 @@

    The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

    -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

    Definition at line 50 of file elasticity.h.

    @@ -210,7 +210,7 @@

    Vector-valued residual operator for linear elasticity in weak form

    -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

    Definition at line 83 of file elasticity.h.

    @@ -252,10 +252,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1610.png"/>

    Definition at line 122 of file elasticity.h.

    @@ -297,10 +297,10 @@

    The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1611.png"/>

    Definition at line 177 of file elasticity.h.

    @@ -350,12 +350,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1612.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 256 of file elasticity.h.

    @@ -411,10 +411,10 @@

    The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1614.png"/>

    Definition at line 308 of file elasticity.h.

    @@ -459,12 +459,12 @@ double factor = 1.&#href_anchor"memdoc">

    Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1615.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 386 of file elasticity.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-12-27 18:25:16.840928918 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-12-27 18:25:16.848928973 +0000 @@ -150,9 +150,9 @@ double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div operator penalizing volume changes

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 51 of file grad_div.h.

    @@ -187,9 +187,9 @@ const double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div residual

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 85 of file grad_div.h.

    @@ -231,10 +231,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u \cdot n)(v \cdot n)  - \nabla\cdot u
 v\cdot n - u \cdot n \nabla \cdot v \Bigr)\;ds.
-\] +\]" src="form_1617.png"/>

    Definition at line 121 of file grad_div.h.

    @@ -284,14 +284,14 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u \cdot \mathbf n- \mathbf g \cdot
 \mathbf n) (\mathbf v \cdot \mathbf n)
 - \nabla \cdot \mathbf u (\mathbf v \cdot \mathbf n)
 - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds.
-\] +\]" src="form_1618.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 173 of file grad_div.h.

    @@ -418,12 +418,12 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Grad-div residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u \cdot\mathbf n]
 \cdot[\mathbf v \cdot \mathbf n]
 - \{\nabla \cdot \mathbf u\}[\mathbf v\cdot \mathbf n]
 - [\mathbf u\times \mathbf n]\{\nabla\cdot \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1619.png"/>

    See for instance Hansbo and Larson, 2002

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-12-27 18:25:16.872929138 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-12-27 18:25:16.876929166 +0000 @@ -249,7 +249,7 @@ - +
    resultThe vector obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    factorA constant that multiplies the result.
    @@ -351,7 +351,7 @@ const double factor2 = 1.&#href_anchor"memdoc"> -

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    +

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    \[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
 \int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-12-27 18:25:16.912929413 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-12-27 18:25:16.916929440 +0000 @@ -165,8 +165,8 @@ const double factor = 1.&#href_anchor"memdoc">

    Laplacian in weak form, namely on the cell Z the matrix

    -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

    @@ -210,7 +210,7 @@

    Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    Definition at line 91 of file laplace.h.

    @@ -253,7 +253,7 @@

    Vector-valued Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

    Definition at line 118 of file laplace.h.

    @@ -288,11 +288,11 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1632.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 156 of file laplace.h.

    @@ -326,12 +326,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1633.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 197 of file laplace.h.

    @@ -380,12 +380,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1634.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 260 of file laplace.h.

    @@ -434,13 +434,13 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1635.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 307 of file laplace.h.

    @@ -499,10 +499,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

    If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

    @@ -564,10 +564,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1637.png"/>

    Warning
    This function is still under development!
    @@ -638,10 +638,10 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    Definition at line 543 of file laplace.h.

    @@ -711,11 +711,11 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Vector-valued residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1638.png"/>

    Definition at line 610 of file laplace.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-12-27 18:25:16.944929632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-12-27 18:25:16.952929687 +0000 @@ -133,22 +133,22 @@

    Local integrators related to curl operators and their traces.

    We use the following conventions for curl operators. First, in three space dimensions

    -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1641.png"/>

    -

    In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1644.png"/>

    Function Documentation

    @@ -174,15 +174,15 @@ const Tensor< 2, dim > & h2&#href_anchor"memdoc">

    Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
 \end{pmatrix}
-\] +\]" src="form_1645.png"/>

    and

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 + \partial_1\partial_3 u_3
 - (\partial_2^2+\partial_3^2) u_1 \\
@@ -191,7 +191,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}.
-\] +\]" src="form_1646.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -227,9 +227,9 @@ const Tensor< 1, dim > & normal&#href_anchor"memdoc">

    Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

    -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1647.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -260,10 +260,10 @@ const double factor = 1.&#href_anchor"memdoc">

    The curl-curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1648.png"/>

    in weak form.

    @@ -299,9 +299,9 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for the curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1649.png"/>

    This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

    @@ -343,14 +343,14 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1650.png"/>

    Definition at line 266 of file maxwell.h.

    @@ -380,10 +380,10 @@ double factor = 1.&#href_anchor"memdoc">

    The product of two tangential traces,

    -\[
+<picture><source srcset=\[
 \int_F (u\times n)(v\times n)
 \, ds.
-\] +\]" src="form_1651.png"/>

    Definition at line 329 of file maxwell.h.

    @@ -451,14 +451,14 @@

    The interior penalty fluxes for Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( \gamma
 \{u\times n\}\{v\times n\} -
 \{u\times n\}\{\nu \nabla\times
 v\}- \{v\times
 n\}\{\nu \nabla\times u\}
 \biggr)\;dx
-\] +\]" src="form_1652.png"/>

    Definition at line 386 of file maxwell.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-12-27 18:25:16.988929935 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-12-27 18:25:16.992929962 +0000 @@ -175,8 +175,8 @@
    -

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    -

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    +

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    +

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

    @@ -242,7 +242,7 @@
    Enumerator
    inside 
    const AffineConstraints< number > & immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
 \text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    \[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
@@ -250,9 +250,9 @@
 \]

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    +

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -370,7 +370,7 @@
    const AffineConstraints< typename Matrix::value_type > & immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
 \text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    \[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
@@ -378,9 +378,9 @@
 \]

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    +

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -513,7 +513,7 @@ \quad i \in [0,n), \alpha \in [0,m), \]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

    @@ -596,7 +596,7 @@ \quad i \in [0,n), \alpha \in [0,m), \]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-12-27 18:25:17.028930209 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-12-27 18:25:17.032930237 +0000 @@ -292,7 +292,7 @@
    -

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    +

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    Definition at line 202 of file quadrature_generator.cc.

    @@ -314,20 +314,20 @@
    -

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    -

    Let $J_I$ be the index set of the indefinite functions:

    -

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    -

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    -

    $|\partial_k \psi_j| > L_{jk}$.

    -

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    +

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    +

    Let $J_I$ be the index set of the indefinite functions:

    +

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    +

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    +

    $|\partial_k \psi_j| > L_{jk}$.

    +

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2222.png"/>

    -

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    -

    $|\partial_i \psi_j| > L$.

    +

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    +

    $|\partial_i \psi_j| > L$.

    Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

    Definition at line 276 of file quadrature_generator.cc.

    @@ -399,7 +399,7 @@ std::pair< double, double > & value_bounds&#href_anchor"memdoc"> -

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    +

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    $[\min(L, L_f), \max(U, U_f)]$,

    where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

    It is assumed that the incoming function is scalar valued.

    @@ -487,7 +487,7 @@
    -

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    +

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    $L_a \leq |f(x)|$,

    by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

    By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

    @@ -676,7 +676,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    +

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -761,7 +761,7 @@
           <td></td>
           <td class=const std::optional< HeightDirectionData > & height_direction_data&#href_anchor"memdoc">

    Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

    -

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    +

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    Definition at line 1018 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-12-27 18:25:17.068930484 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceOpenCASCADE.html 2024-12-27 18:25:17.076930539 +0000 @@ -432,7 +432,7 @@
    -

    Perform the intersection of the given topological shape with the plane $c_x x + c_y y + c_z z +c = 0$. The returned topological shape will contain as few bsplines as possible. An exception is thrown if the intersection produces an empty shape.

    +

    Perform the intersection of the given topological shape with the plane $c_x x + c_y y + c_z z +c = 0$. The returned topological shape will contain as few bsplines as possible. An exception is thrown if the intersection produces an empty shape.

    Definition at line 427 of file utilities.cc.

    @@ -570,7 +570,7 @@ const Mapping< 2, spacedim > & mapping = StaticMappingQ1<2,&#href_anchor"memdoc">

    Given a Triangulation and an optional Mapping, create a vector of smooth curves that interpolate the connected parts of the boundary vertices of the Triangulation and return them as a vector of TopoDS_Edge objects.

    -

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    +

    This function constructs closed Bspline curve objects passing through all vertices of the boundary of the triangulation, with $C^2$ Continuity on each vertex except the first, where only $C^1$ continuity is guaranteed.

    The returned curves are ordered with respect to the indices of the faces that make up the triangulation boundary, i.e., the first curve is the one extracted starting from the face with the lowest index, and so on.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-12-27 18:25:17.096930676 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-12-27 18:25:17.100930704 +0000 @@ -154,21 +154,21 @@
    const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation sparsity pattern for particles.

    -

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<p>Given a triangulation representing the domain <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    +\[
 M_{i,j} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2514.png"/>

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2515.png"/>

    -

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    +

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    +

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

    Definition at line 31 of file utilities.cc.

    @@ -205,21 +205,21 @@
    const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation matrix for particles.

    -

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    -\[
+<p>Given a triangulation representing the domains <picture><source srcset=$\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    +\[
 M_{ij} \dealcoloneq v_j(x_i) ,
-\] +\]" src="form_2517.png"/>

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    -\[
+<picture><source srcset=\[
  M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
-\] +\]" src="form_2515.png"/>

    -

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    +

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    +

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

    Definition at line 113 of file utilities.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-12-27 18:25:17.144931006 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-12-27 18:25:17.152931061 +0000 @@ -196,7 +196,7 @@ \end{array} \right] , \]" src="form_2555.png"/>

    -

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    +

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    \[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html	2024-12-27 18:25:17.176931225 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html	2024-12-27 18:25:17.184931280 +0000
@@ -136,7 +136,7 @@
 <tr class=

    &#href_anchor"details" id="details">

    Detailed Description

    A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

    Notation

    -

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    +

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

    Note
    For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

    Function Documentation

    @@ -205,8 +205,8 @@

    Parameters
    - - + +
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -240,7 +240,7 @@
    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -274,7 +274,7 @@
    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -307,7 +307,7 @@
    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    @@ -340,7 +340,7 @@
    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-12-27 18:25:17.204931418 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-12-27 18:25:17.212931473 +0000 @@ -484,11 +484,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\sharp} \right]_{IJKL}
    \dealcoloneq F^{-1}_{Ii} F^{-1}_{Jj}
    \left(\bullet\right)^{\sharp}_{ijkl} F^{-1}_{Kk} F^{-1}_{Ll}
-\] +\]" src="form_2619.png"/>

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-12-27 18:25:17.240931665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-12-27 18:25:17.240931665 +0000 @@ -162,10 +162,10 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2620.png"/>

    Parameters
    @@ -197,11 +197,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    @@ -233,11 +233,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    @@ -269,11 +269,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    @@ -305,11 +305,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    @@ -341,10 +341,10 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2623.png"/>

    Parameters
    @@ -376,11 +376,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2624.png"/>

    Parameters
    @@ -412,11 +412,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}
-\] +\]" src="form_2625.png"/>

    Parameters
    @@ -448,11 +448,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\flat} \right]_{IJKL}
  \dealcoloneq F^{T}_{Ii} F^{T}_{Jj}
  \left(\bullet\right)^{\flat}_{ijkl} F^{T}_{Kk} F^{T}_{Ll}
-\] +\]" src="form_2626.png"/>

    Parameters
    @@ -484,11 +484,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\flat} \right]_{IJKL}
  \dealcoloneq F^{T}_{Ii} F^{T}_{Jj}
  \left(\bullet\right)^{\flat}_{ijkl} F^{T}_{Kk} F^{T}_{Ll}
-\] +\]" src="form_2626.png"/>

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-12-27 18:25:17.264931830 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-12-27 18:25:17.264931830 +0000 @@ -153,11 +153,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2627.png"/>

    Parameters
    @@ -167,8 +167,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
    @@ -190,11 +190,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    @@ -204,8 +204,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -227,11 +227,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    @@ -241,8 +241,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -264,12 +264,12 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    @@ -279,8 +279,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -302,12 +302,12 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    @@ -317,8 +317,8 @@
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -340,11 +340,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2633.png"/>

    Parameters
    @@ -354,8 +354,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
    @@ -377,11 +377,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2635.png"/>

    Parameters
    @@ -391,8 +391,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
+\right)$
    @@ -414,11 +414,11 @@
    const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2635.png"/>

    Parameters
    @@ -428,8 +428,8 @@
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{t}
/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html	2024-12-27 18:25:17.292932022 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html	2024-12-27 18:25:17.292932022 +0000
@@ -136,11 +136,11 @@
           <td class=
    const Tensor< 1, spacedim, Number > & b&#href_anchor"memdoc"> -

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    +

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    This function uses the geometric definition of the scalar product.

    -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2640.png"/>

    @@ -168,20 +168,20 @@
    const Tensor< 1, spacedim, Number > & axis&#href_anchor"memdoc">

    Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

    -

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    +

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

    This function uses the geometric definitions of both the scalar and cross product.

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2642.png"/>

    We can create the tangent of the angle using both products.

    -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2643.png"/>

    Note
    Only applicable for three-dimensional vectors spacedim == 3.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-12-27 18:25:17.316932187 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-12-27 18:25:17.320932214 +0000 @@ -123,13 +123,13 @@
    &#href_anchor"memitem:">class  TransformationSpectrumFolding
    &#href_anchor"details" id="details">

    Detailed Description

    Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

    -

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    +

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    SLEPcWrappers can be implemented in application codes in the following way:

    SolverControl solver_control (1000, 1e-9);
    SolverArnoldi system (solver_control, mpi_communicator);
    system.solve (A, B, lambda, x, size_of_spectrum);
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    system.set_which_eigenpairs (EPS_SMALLEST_REAL);

    These options can also be set at the command line.

    See also step-36 for a hands-on example.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-12-27 18:25:17.340932352 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-12-27 18:25:17.344932379 +0000 @@ -192,7 +192,7 @@
    const VectorType &b,
    double tol)>

    Type of function objects to interface with SUNDIALS' linear solvers

    -

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    +

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-12-27 18:25:17.372932571 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-12-27 18:25:17.376932599 +0000 @@ -122,19 +122,19 @@
    [in]opA LinearOperator that applies the matrix vector product

    Detailed Description

    Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

    -

    From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2315.png"/>

    -

    with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    -

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    +

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -143,40 +143,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2319.png"/>

    The sum is finite only if the summands decay at least with order

    -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2320.png"/>

    -

    for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2325.png"/>

    -

    The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2327.png"/>

    -

    with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2328.png"/>

    -

    where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -193,10 +193,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2332.png"/>

    -

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    -

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    +

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    +

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    Note
    An extensive demonstration of the use of these functions is provided in step-27.

    Function Documentation

    @@ -241,17 +241,17 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2346.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    -

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    +

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 368 of file smoothness_estimator.cc.

    @@ -300,11 +300,11 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    -

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 466 of file smoothness_estimator.cc.

    @@ -332,7 +332,7 @@

    Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

    -

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    +

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

    Definition at line 575 of file smoothness_estimator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-12-27 18:25:17.404932791 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-12-27 18:25:17.404932791 +0000 @@ -122,25 +122,25 @@

    Detailed Description

    Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

    -

    In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

    -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

    +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2305.png"/>

    -

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    +

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

    -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2309.png"/>

    -

    We determine their decay rate $\sigma$ by performing the linear regression fit of

    -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

    +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2311.png"/>

    -

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    +

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    Function Documentation

    ◆ coefficient_decay()

    @@ -184,24 +184,24 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2338.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]dof_handlerA DoFHandler.
    [in]solutionA solution vector.
    [out]smoothness_indicatorsA vector for smoothness indicators.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
    @@ -254,16 +254,16 @@
    const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]dof_handlerA DoFHandler
    [in]solutionA solution vector
    [out]smoothness_indicatorsA vector for smoothness indicators
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-12-27 18:25:17.424932928 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-12-27 18:25:17.428932956 +0000 @@ -152,18 +152,18 @@
    SparsityPatternType2 & sparsity_pattern_out&#href_anchor"memdoc">

    Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

    -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_2014.png"/>

    -

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    -

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    +

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    +

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    Such a function is useful to implement Schwarz methods, where operations of type

    -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_2016.png"/>

    -

    are performed to iteratively solve a system of type $Au=f$.

    +

    are performed to iteratively solve a system of type $Au=f$.

    Warning
    This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-12-27 18:25:17.456933148 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-12-27 18:25:17.456933148 +0000 @@ -191,7 +191,7 @@
    Note
    This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
    Template Parameters
    - +
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    rankRank of the tensorial object t
    TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
    @@ -275,12 +275,12 @@

    This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

    -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_900.png"/>

    Calling this function is equivalent of writing the following low level code:

    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    ...
    @@ -335,12 +335,12 @@

    Full contraction of three tensorial objects:

    -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_901.png"/>

    Calling this function is equivalent of writing the following low level code:

    T1 result = T1();
    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-12-27 18:25:17.508933505 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-12-27 18:25:17.512933533 +0000 @@ -1034,13 +1034,13 @@

    Calculate a fixed power, provided as a template argument, of a number.

    -

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    +

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    \begin{align*}
   t^7 = (tttt)(tt)(t)
 \end{align*}

    where computing $tt$ requires one product, computing $tttt$ is achieved by multiplying the previously computed $tt$ by itself (requiring another multiplication), and then the product is computed via two more multiplications for a total of 4 multiplications instead of the naively necessary 6.

    -

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    +

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    Use this function as in fixed_power<dim> (t) or fixed_power<7> (t).

    Definition at line 942 of file utilities.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-12-27 18:25:17.544933752 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-12-27 18:25:17.548933780 +0000 @@ -147,7 +147,7 @@

    Return the elements of a continuous Givens rotation matrix and the norm of the input vector.

    -

    That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    +

    That is for a given pair x and y, return $c$ , $s$ and $\sqrt{x^2+y^2}$ such that

    \[
 \begin{bmatrix}
 c  & s \\
@@ -189,7 +189,7 @@
       </table>
 </div><div class=

    Return the elements of a hyperbolic rotation matrix.

    -

    That is for a given pair x and y, return $c$ , $s$ and $r$ such that

    +

    That is for a given pair x and y, return $c$ , $s$ and $r$ such that

    \[
 \begin{bmatrix}
 c  & -s \\
@@ -289,8 +289,8 @@
           <td class=

    VectorMemory< VectorType > & vector_memory&#href_anchor"memdoc"> -

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    +

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

    @@ -298,8 +298,8 @@
    -

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    -

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    +

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    +

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    vector_memory is used to allocate memory for temporary objects.

    This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from [Zhou2014].

    Note
    If tau is equal to std::numeric_limits<double>::infinity(), no normalization will be performed.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-12-27 18:25:17.596934109 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-12-27 18:25:17.596934109 +0000 @@ -1313,7 +1313,7 @@
    const MPI_Comm comm&#href_anchor"memdoc"> -

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    +

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    This function is only available if T is a type natively supported by MPI.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-12-27 18:25:17.632934357 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-12-27 18:25:17.636934384 +0000 @@ -140,7 +140,7 @@
    std::vector< unsigned int >&#href_anchor"memTemplItemRight" valign="bottom">selector (const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< void(const unsigned int, const RequestType &)> &process_request, const MPI_Comm comm)
    &#href_anchor"details" id="details">

    Detailed Description

    A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

    -

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    +

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

    The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

    As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-12-27 18:25:17.756935208 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-12-27 18:25:17.760935235 +0000 @@ -343,7 +343,7 @@

    • -

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      +

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

      The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

      Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

      @@ -406,220 +406,220 @@
    -

    Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    -

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    +

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2398.png"/>

    Similarly for suprema over a cell $T$:

    -\[
+<picture><source srcset=\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2399.png"/>

    - -
    Enumerator
    mean 

    The function or difference of functions is integrated on each cell $K$:

    -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2400.png"/>

    and summed up to get

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2401.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2403.png"/>

    -

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    +

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    L1_norm 

    The absolute value of the function is integrated:

    -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2406.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2407.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2408.png"/>

    L2_norm 

    The square of the function is integrated and the square root of the result is computed on each cell:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2409.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2410.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2411.png"/>

    Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    -\[
+<tr><td class=Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2412.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2413.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2414.png"/>

    Linfty_norm 

    The maximum absolute value of the function:

    -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2415.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2416.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2417.png"/>

    H1_seminorm 

    L2_norm of the gradient:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2418.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega \sum_c (\nabla e_c)^2 \,
 w_c }
-\] +\]" src="form_2419.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| \nabla e \|_{L^2}.
-\] +\]" src="form_2420.png"/>

    Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    -\[
+<tr><td class=Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    +\[
   E_K = \sqrt{ \int_K \left( \sum_c \frac{\partial e_c}{\partial x_c} \,
 \sqrt{w_c} \right)^2 }
-\] +\]" src="form_2422.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2}
     = \sqrt{ \int_\Omega \left( \sum_c \frac{\partial e_c}{\partial x_c}
 \, \sqrt{w_c} \right)^2  }
-\] +\]" src="form_2423.png"/>

    -

    or, for $w \equiv 1$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-12-27 18:25:17.800935510 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-12-27 18:25:17.808935565 +0000 @@ -541,7 +541,7 @@

    -

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    +

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

    For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

    During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

    @@ -565,7 +565,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1575.png"/>

    On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

    -

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    +

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

    Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-12-27 18:25:17.936936444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-12-27 18:25:17.940936471 +0000 @@ -949,8 +949,8 @@
    const double coordinate_value&#href_anchor"memdoc">

    Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

    -

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    -

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    +

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    +

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    Definition at line 23 of file function_restriction.cc.

    @@ -2594,7 +2594,7 @@
    -

    Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-12-27 18:25:17.976936719 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-12-27 18:25:17.984936774 +0000 @@ -396,7 +396,7 @@
    const unsigned int grainsize&#href_anchor"memdoc">

    This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified, and results are added up (i.e., the reduction of results from subranges happens by adding up these results).

    -

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    +

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    const Vector &x)
    {
    return
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-12-27 18:25:18.012936966 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-12-27 18:25:18.016936993 +0000 @@ -353,7 +353,7 @@
    if (cell->center()[1] > 0)
    cell->set_refine_flag ();
    IteratorRange< active_cell_iterator > active_cell_iterators() const
    -

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    +

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

    A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation topic.

    Different geometries

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-12-27 18:25:18.060937295 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-12-27 18:25:18.060937295 +0000 @@ -123,11 +123,11 @@
  • The plain program
  • Introduction

    -

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    +

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

    Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

    -

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    -

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    +

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    +

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    Note
    This tutorial shows in essence how to choose a particular mapping for integrals, by attaching a particular geometry to the triangulation (as had already been done in step-1, for example) and then passing a mapping argument to the FEValues class that is used for all integrals in deal.II. The geometry we choose is a circle, for which deal.II already has a class (SphericalManifold) that can be used. If you want to define your own geometry, for example because it is complicated and cannot be described by the classes already available in deal.II, you will want to read through step-53.

    The commented program

    The first of the following include files are probably well-known by now and need no further explanation.

    @@ -168,7 +168,7 @@
     
    void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
    -

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

    +

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      for (unsigned int refinement = 0; refinement < 2; ++refinement)
      {
      std::cout << "Refinement level: " << refinement << std::endl;
    @@ -205,9 +205,9 @@
      }
      }
     
    -

    Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
+</div><!-- fragment --><p>Now we proceed with the main part of the code, the approximation of <picture><source srcset=$\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

    + \ J(\hat x_i)w(\hat x_i)$" src="form_2879.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      template <int dim>
      void compute_pi_by_area()
      {
    @@ -243,7 +243,7 @@
     
    @ update_JxW_values
    Transformed quadrature weights.
    -

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

    +

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

     
    @@ -291,7 +291,7 @@
      }
     
     
    -

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

    +

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

      template <int dim>
      void compute_pi_by_perimeter()
      {
    @@ -414,11 +414,11 @@
    unset ytics
    plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

    or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

    -

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    +

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    Five-cell discretization of the disk.
    20-cell discretization of the disk (i.e., five cells
               refined once).
    Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with quadratic edges.
    Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
    Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

    Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

    @@ -510,14 +510,14 @@
    1280 3.1415926535897896 3.5527e-15 3.32
    5120 3.1415926535897940 8.8818e-16 2.00
    Note
    Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
    -

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    +

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

    Possibilities for extensions

    -

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    -

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    +

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    +

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    20 3.1415926314742491 2.2116e-08 7.95
    80 3.1415926535026268 8.7166e-11 7.99
    320 3.1415926535894005 3.9257e-13 7.79
    @@ -530,7 +530,7 @@
    320 3.1415926535894516 3.4157e-13 8.00
    1280 3.1415926535897918 1.5339e-15 7.80
    5120 3.1415926535897927 5.2649e-16 1.54
    -

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    +

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    20 3.1415926513737582 2.2160e-09 7.96
    80 3.1415926535810699 8.7232e-12 7.99
    320 3.1415926535897576 3.5527e-14 7.94
    @@ -542,7 +542,7 @@
    320 3.1415926535897576 3.5705e-14 7.93
    1280 3.1415926535897918 1.3785e-15 4.70
    5120 3.1415926535897944 1.3798e-15 -0.00
    -

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    +

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    What explains this unpredictability? In general, round-off errors can be thought of as random, and add up in ways that are not worth thinking too much about; we should therefore always treat any accuracy beyond, say, thirteen digits as suspect. Thus, it is probably not worth spending too much time on wondering why we get different winners and losers in the data type exchange from double and long double. The accuracy of the results is also largely not determined by the precision of the data type in which we accumulate each cell's (or face's) contributions, but the accuracy of what deal.II gives us via FEValues::JxW() and FEFaceValues::JxW(), which always uses double precision and which we cannot directly affect.

    But there are cases where one can do something about the precision, and it is worth at least mentioning the name of the most well-known algorithm in this area. Specifically, what we are doing when we add contributions into the area and perimeter values is that we are adding together positive numbers as we do here. In general, the round-off errors associated with each of these numbers is random, and if we add up contributions of substantially different sizes, then we will likely be dominated by the error in the largest contributions. One can avoid this by adding up numbers sorted by their size, and this may then result in marginally more accurate end results. The algorithm that implements this is typically called Kahan's summation algorithm. While one could play with it in the current context, it is likely not going to improve the accuracy in ways that will truly matter.

    The plain program

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-12-27 18:25:18.104937598 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-12-27 18:25:18.108937625 +0000 @@ -134,7 +134,7 @@ \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0. \]" src="form_2889.png"/>

    -

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    +

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

    For this, there are various possibilities:

    1. @@ -336,7 +336,7 @@

      That's quite simple, right?

      Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

      The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      + 1$" src="form_2900.png"/>, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

      Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

        Vector<double> tmp(system_rhs.size());
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-12-27 18:25:18.152937927 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-12-27 18:25:18.156937955 +0000 @@ -157,7 +157,7 @@ u=g\quad\mbox{on }\Gamma_-, \]" src="form_2903.png"/>

      -

      on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      +

      on the inflow part $\Gamma_-$ of the boundary $\Gamma=\partial\Omega$ of the domain. Here, ${\mathbf \beta}={\mathbf \beta}({\bf x})$ denotes a vector field, $u$ the (scalar) solution function, $g$ a boundary value function,

      \[
 \Gamma_- \dealcoloneq \{{\bf x}\in\Gamma, {\mathbf \beta}({\bf x})\cdot{\bf n}({\bf x})<0\}
 \] @@ -837,7 +837,7 @@

      There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

      Possibilities for extensions

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      -

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      +

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

      The plain program

      /* ------------------------------------------------------------------------
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-12-27 18:25:18.296938916 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-12-27 18:25:18.300938943 +0000 @@ -176,30 +176,30 @@

      The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

      We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

      The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

      -

      In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      -

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      +

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2946.png"/>

      -

      where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      +\[
   J(e) = a(e,z)
-\] +\]" src="form_2949.png"/>

      and we can, by Galerkin orthogonality, rewrite this as

      -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2950.png"/>

      -

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      +

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      Concretely, for Laplace's equation, the error identity reads

      -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2952.png"/>

      Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -207,54 +207,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2953.png"/>

      -

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      +

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      Thus, we have

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2958.png"/>

      -

      In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2959.png"/>

      -

      Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

      -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

      +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2961.png"/>

      -

      and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2963.png"/>

      -

      With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      -

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      +

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2969.png"/>

      with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

      -

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      +

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

      -
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
      +
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

      The software

      The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

      The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

      @@ -2587,15 +2587,15 @@

    Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

    The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

    -

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    +

    From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

    Comparing refinement criteria

    -

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    +

    Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

    -

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    +

    Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

    Evaluation of point stresses

    Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

    Refinement cycle: 0
    Number of degrees of freedom: 72
    @@ -2647,16 +2647,16 @@
    -

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    -

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    +

    Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

    +

    Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

    In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

    -

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    +

    After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

    step-13 revisited

    -

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    +

    If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

    First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-12-27 18:25:18.356939328 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-12-27 18:25:18.360939355 +0000 @@ -156,41 +156,41 @@

    Introduction

    Foreword

    -

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    +

    This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

    Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

    Note
    The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

    Classical formulation

    In a classical sense, the problem is given in the following form:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2984.png"/>

    -

    $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    -

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

    +

    As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

    +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2988.png"/>

    with

    -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2989.png"/>

    -

    and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

    +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2992.png"/>

    -

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    +

    Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

    -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -198,62 +198,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2994.png"/>

    -

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    -

    Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    -
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
    +

    In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

    +

    Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

    +
    Note
    In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

    Weak formulation of the problem

    -

    Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

    -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

    +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_3002.png"/>

    -

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    +

    Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

    -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_3005.png"/>

    -

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    +

    Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

    -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_3007.png"/>

    -

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    +

    where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

    -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_3009.png"/>

    -

    where the entries of the matrix $A^{n}$ are given by:

    +

    where the entries of the matrix $A^{n}$ are given by:

    -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_3011.png"/>

    -

    and the right hand side $b^{n}$ is given by:

    +

    and the right hand side $b^{n}$ is given by:

    -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_3013.png"/>

    Questions about the appropriate solver

    The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

    -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -261,10 +261,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_3014.png"/>

    -

    where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

    +\[
   B
   =
   a_n \left\{
@@ -279,44 +279,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_3016.png"/>

    -

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    -

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    +

    From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

    +

    It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

    Choice of step length and globalization

    -

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    -

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    +

    As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

    +

    A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

    In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

    Summary of the algorithm and testcase

    Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

    1. -

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

      +

      Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

    2. -

      Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      +

      Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

    3. -

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      +

      Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-12-27 18:25:18.408939685 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-12-27 18:25:18.412939712 +0000 @@ -153,7 +153,7 @@
      -

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      +

      The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

      The testcase

      The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

      The commented program

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-12-27 18:25:18.496940289 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-12-27 18:25:18.500940317 +0000 @@ -167,23 +167,23 @@

      Quasistatic elastic deformation

      Motivation of the model

      In general, time-dependent small elastic deformations are described by the elastic wave equation

      -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_3065.png"/>

      -

      where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

      +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_3068.png"/>

      and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -191,12 +191,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3069.png"/>

      -

      In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      -

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      -\begin{eqnarray*}
+<p> In above formulation,  <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

      +

      The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

      +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -208,13 +208,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3075.png"/>

      -

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      +

      Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

      While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

      Note
      The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
      -

      To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

      -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

      +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -226,30 +226,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3077.png"/>

      -

      Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

      +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_3080.png"/>

      -

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      +

      where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

      Time discretization

      -

      Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

      -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

      +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_3082.png"/>

      where

      -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_3083.png"/>

      -

      and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

      +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -261,11 +261,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_3086.png"/>

      -

      The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find  <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -277,12 +277,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_3088.png"/>

      -

      Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that   <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

      +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -294,32 +294,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_3090.png"/>

      -

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      -

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      +

      We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

      +

      The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

      There are differences, however:

      1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

      2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
      3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

      These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

      Updating the stress variable

      -

      As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

      -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

      +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_3099.png"/>

      -

      There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      -

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

      +

      To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

      +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -328,12 +328,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_3103.png"/>

      -

      where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      -

      The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

      -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-12-27 18:25:18.580940866 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-12-27 18:25:18.584940893 +0000 @@ -161,135 +161,135 @@

      The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

      The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

      The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

      -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3134.png"/>

      -

      where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

      +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3136.png"/>

      -

      In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      -

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

      +

      Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

      +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3141.png"/>

      -

      where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

      +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3143.png"/>

      -

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      +

      The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

      There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

      -

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      +

      First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

      Second, in principle we would have to model the charge density via

      -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3148.png"/>

      -

      The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

      +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3152.png"/>

      -

      which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

      -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

      +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3153.png"/>

      -

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      -

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      +

      It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

      +

      As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

      Time discretization

      The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3157.png"/>

      Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

      -

      So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

      +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3158.png"/>

      -

      This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      +

      This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

      There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

      -\[
+<picture><source srcset=\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3161.png"/>

      or equivalently

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3162.png"/>

      -

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      +

      Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

      On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

      -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3163.png"/>

      But even that is not good enough: The formula above updates the particle positions in each time using the formula

      -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3164.png"/>

      -

      that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

      +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3168.png"/>

      -

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      +

      How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

      There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

      -

      In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

      -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

      +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3174.png"/>

      and consequently

      -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3175.png"/>

      which we can write as

      -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3176.png"/>

      -

      Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

      -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

      +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3178.png"/>

      Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

      -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3179.png"/>

      -

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      -

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      +

      Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

      +

      We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

      Spatial discretization

      -

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      +

      Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

      Dealing with particles programmatically

      Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

      new_particle.set_location(location);
      @@ -302,7 +302,7 @@
      void set_reference_location(const Point< dim > &new_reference_location)
      Definition particle.h:572
      void set_id(const types::particle_index &new_id)
      Definition particle.h:599
      void set_location(const Point< spacedim > &new_location)
      Definition particle.h:545
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-12-27 18:25:18.628941196 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-12-27 18:25:18.628941196 +0000 @@ -132,14 +132,14 @@

    Introduction

    Note
    The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
    -

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    +

    The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

    In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

    -

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    -

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    +

    In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

    +

    The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

    Enumerating degrees of freedom

    -

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    +

    Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

    Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

    The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

    @@ -148,11 +148,11 @@

    The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

    To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

    Sparsity is one of the distinguishing features of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

    -

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    -

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    +

    In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

    +

    The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

    How degrees of freedom are enumerated

    By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

    -

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    +

    For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

    In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

    The commented program

    The first few includes are just like in the previous program, so do not require additional comments:

    @@ -288,7 +288,7 @@
     

    Renumbering of DoFs

    In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

    -

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    +

    Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

    This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

    One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

      void renumber_dofs(DoFHandler<2> &dof_handler)
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-12-27 18:25:18.700941690 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-12-27 18:25:18.704941717 +0000 @@ -167,13 +167,13 @@ p &=& g \qquad {\textrm{on}\ }\partial\Omega. \end{eqnarray*}" src="form_3206.png"/>

    -

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    +

    $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

    After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

    We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

    The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

    The equations

    In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

    -

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    +

    The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

    In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

    Formulation, weak form, and discrete problem

    To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

    @@ -202,15 +202,15 @@ \end{eqnarray*}" src="form_3217.png"/>

    Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

    -

    To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    -

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+<p>To be well-posed, we have to look for solutions and test functions in the space <picture><source srcset=$H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

    +

    To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
 u}_h,p_h$ so that

    \begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
 \end{eqnarray*}

    -

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    +

    Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

    \begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
@@ -233,7 +233,7 @@
 \end{eqnarray*}

    If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

    -

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    +

    On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

    Assembling the linear system

    The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

    \begin{eqnarray*}
@@ -241,8 +241,8 @@
 \end{eqnarray*}

    with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

    -

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    -

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    +

    Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

    +

    This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

    So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

    For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

    We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

    @@ -276,7 +276,7 @@

    fe_values.shape_value_component(j,q,1)
    ) *
    fe_values.JxW(q);
    -

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    +

    This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

    const FEValuesExtractors::Vector velocities (0);
    const FEValuesExtractors::Scalar pressure (dim);
    @@ -354,8 +354,8 @@

    You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

    Linear solvers and preconditioners

    After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

      -
    • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
    • -
    • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).
    • +
    • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
    • +
    • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).

    At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

    For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

    @@ -375,24 +375,24 @@ \end{array}\right), \end{eqnarray*}" src="form_3250.png"/>

    -

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    +

    where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

    By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

    \begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
   MU &=& F - BP.
 \end{eqnarray*}

    -

    Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

    -

    Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      +

      Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

      +

      Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

      1. compute $w = B v$;
      2. -solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      3. +solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
      4. compute $z=B^Ty$ to obtain $z=Sv$.

      Note how we evaluate the expression $B^TM^{-1}Bv$ right to left to avoid matrix-matrix products; this way, all we have to do is evaluate matrix-vector products.

      -

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      +

      In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

      Note
      The key point in this consideration is to recognize that to implement an iterative solver such as CG or GMRES, we never actually need the actual elements of a matrix! All that is required is that we can form matrix-vector products. The same is true for preconditioners. In deal.II we encode this requirement by only requiring that matrices and preconditioners given to solver classes have a vmult() member function that does the matrix-vector product. How a class chooses to implement this function is not important to the solver. Consequently, classes can implement it by, for example, doing a sequence of products and linear solves as discussed above.

      The LinearOperator framework in deal.II

      deal.II includes support for describing such linear operations in a very general way. This is done with the LinearOperator class that, like the MatrixType concept, defines a minimal interface for applying a linear operation to a vector:

      std::function<void(Range &, const Domain &)> vmult;
      @@ -416,10 +416,10 @@
      LinearOperator< Range, Domain, Payload > linear_operator(const OperatorExemplar &, const Matrix &)
      LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)

      Rather than using a SolverControl we use the ReductionControl class here that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-18}$) or when the residual is reduced by a certain factor (here, $10^{-10}$). In contrast the SolverControl class only checks for absolute tolerances. We have to use ReductionControl in our case to work around a minor issue: The right hand sides that we will feed to op_M_inv are essentially formed by residuals that naturally decrease vastly in norm as the outer iterations progress. This makes control by an absolute tolerance very error prone.

      -

      We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

      const auto op_B = linear_operator(B);
      +

      We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

      const auto op_B = linear_operator(B);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
      -

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      +

      Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

      B.vmult (tmp1, src); // multiply with the top right block: B
      solver_M(M, tmp2, tmp1, preconditioner_M); // multiply with M^-1
      B.Tvmult (dst, tmp2); // multiply with the bottom left block: B^T

      (tmp1 and tmp2 are two temporary vectors). The key point behind this approach is the fact that we never actually create an inner product of matrices. Instead, whenever we have to perform a matrix vector multiplication with op_S we simply run all individual vmult operations in above sequence.

      @@ -438,10 +438,10 @@
      }
      };
      -
      Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
      const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
      +
      Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
      const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
      const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
      The manual approach on the other hand obscures this fact.
    -

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    +

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    Vector<double> tmp (U.size());
    op_M_inv.vmult (tmp, F);
    transpose_operator(op_B).vmult (schur_rhs, tmp);
    @@ -450,7 +450,7 @@
    std::function<void(Range &)> apply_add;

    The class allows lazy evaluation of expressions involving vectors and linear operators. This is done by storing the computational expression and only performing the computation when either the object is converted to a vector object, or PackagedOperation::apply() (or PackagedOperation::apply_add()) is invoked by hand. Assuming that F and G are the two vectors of the right hand side we can simply write:

    const auto schur_rhs = transpose_operator(op_B) * op_M_inv * F - G;

    Here, schur_rhs is a PackagedOperation that records the computation we specified. It does not create a vector with the actual result immediately.

    -

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    +

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    SolverCG<Vector<double>> solver_S(solver_control_S);
    PreconditionIdentity preconditioner_S;
    @@ -463,16 +463,16 @@
    Note
    The functionality that we developed in this example step by hand is already readily available in the library. Have a look at schur_complement(), condense_schur_rhs(), and postprocess_schur_solution().

    A preconditioner for the Schur complement

    One may ask whether it would help if we had a preconditioner for the Schur complement $S=B^TM^{-1}B$. The general answer, as usual, is: of course. The problem is only, we don't know anything about this Schur complement matrix. We do not know its entries, all we know is its action. On the other hand, we have to realize that our solver is expensive since in each iteration we have to do one matrix-vector product with the Schur complement, which means that we have to do invert the mass matrix once in each iteration.

    -

    There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

    +

    There are different approaches to preconditioning such a matrix. On the one extreme is to use something that is cheap to apply and therefore has no real impact on the work done in each iteration. The other extreme is a preconditioner that is itself very expensive, but in return really brings down the number of iterations required to solve with $S$.

    We will try something along the second approach, as much to improve the performance of the program as to demonstrate some techniques. To this end, let us recall that the ideal preconditioner is, of course, $S^{-1}$, but that is unattainable. However, how about

    \begin{eqnarray*}
   \tilde S^{-1} = [B^T ({\textrm{diag}\ }M)^{-1}B]^{-1}
 \end{eqnarray*}

    -

    as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

    -

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    +

    as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

    +

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    transpose_operator(op_B) * linear_operator(preconditioner_M) * op_B;
    -

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    +

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    With all this we almost have the preconditioner completed: it should be the inverse of the approximate Schur complement. We implement this again by creating a linear operator with inverse_operator() function. This time however we would like to choose a relatively modest tolerance for the CG solver (that inverts op_aS). The reasoning is that op_aS is only coarse approximation to op_S, so we actually do not need to invert it exactly. This, however creates a subtle problem: preconditioner_S will be used in the final outer CG iteration to create an orthogonal basis. But for this to work, it must be precisely the same linear operation for every invocation. We ensure this by using an IterationNumberControl that allows us to fix the number of CG iterations that are performed to a fixed small number (in our case 30):

    IterationNumberControl iteration_number_control_aS(30, 1.e-18);
    SolverCG<Vector<double>> solver_aS(iteration_number_control_aS);
    PreconditionIdentity preconditioner_aS;
    @@ -732,7 +732,7 @@
     
    void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())

    The next thing is that we want to figure out the sizes of these blocks so that we can allocate an appropriate amount of space. To this end, we call the DoFTools::count_dofs_per_fe_component() function that counts how many shape functions are non-zero for a particular vector component. We have dim+1 vector components, and DoFTools::count_dofs_per_fe_component() will count how many shape functions belong to each of these components.

    -

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    +

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    Using this knowledge, we can get the number of velocity shape functions from any of the first dim elements of dofs_per_component, and then use this below to initialize the vector and matrix block sizes, as well as create output.

    Note
    If you find this concept difficult to understand, you may want to consider using the function DoFTools::count_dofs_per_fe_block() instead, as we do in the corresponding piece of code in step-22. You might also want to read up on the difference between blocks and components in the glossary.
      const std::vector<types::global_dof_index> dofs_per_component =
    @@ -1084,7 +1084,7 @@
      }

    Results

    Output of the program and graphical visualization

    -

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
    +

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
     [ 66%] Built target step-20
     Scanning dependencies of target run
     [100%] Run step-20 with Release configuration
    @@ -1103,7 +1103,7 @@
     

    As an additional remark, note how the x-velocity in the left image is only continuous in x-direction, whereas the y-velocity is continuous in y-direction. The flow fields are discontinuous in the other directions. This very obviously reflects the continuity properties of the Raviart-Thomas elements, which are, in fact, only in the space H(div) and not in the space $H^1$. Finally, the pressure field is completely discontinuous, but that should not surprise given that we have chosen FE_DGQ(0) as the finite element for that solution component.

    Convergence

    The program offers two obvious places where playing and observing convergence is in order: the degree of the finite elements used (passed to the constructor of the MixedLaplaceProblem class from main()), and the refinement level (determined in MixedLaplaceProblem::make_grid_and_dofs). What one can do is to change these values and observe the errors computed later on in the course of the program run.

    -

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    +

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    @@ -1126,7 +1126,7 @@
    Finite element order
    $O(h)$ $O(h^2)$ $O(h^3)$

    The theoretically expected convergence orders are very nicely reflected by the experimentally observed ones indicated in the last row of the table.

    -

    One can make the same experiment with the $L_2$ error in the velocity variables:

    +

    One can make the same experiment with the $L_2$ error in the velocity variables:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-12-27 18:25:18.796942349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-12-27 18:25:18.800942377 +0000 @@ -168,7 +168,7 @@

    The equations covered here are an extension of the material already covered in step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

    The two phase flow problem

    Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

    -

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    +

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

    \begin{eqnarray*}
   \mathbf{u}_{j}
@@ -176,7 +176,7 @@
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
 \end{eqnarray*}

    -

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    +

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    We combine Darcy's law with the statement of conservation of mass for each phase,

    \[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
@@ -187,7 +187,7 @@
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
 \end{eqnarray*}

    -

    Here, $q$ is the sum source term, and

    +

    Here, $q$ is the sum source term, and

    \[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
 \] @@ -231,7 +231,7 @@

    Note that the advection equation contains the term $\mathbf{u} \cdot \nabla
 F(S)$ rather than $\mathbf{u} \cdot \nabla S$ to indicate that the saturation is not simply transported along; rather, since the two phases move with different velocities, the saturation can actually change even in the advected coordinate system. To see this, rewrite $\mathbf{u} \cdot \nabla F(S)
-= \mathbf{u} F'(S) \cdot \nabla S$ to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

    += \mathbf{u} F'(S) \cdot \nabla S$" src="form_3302.png"/> to observe that the actual velocity with which the phase with saturation $S$ is transported is $\mathbf u F'(S)$ whereas the other phase is transported at velocity $\mathbf u (1-F'(S))$. $F(S)$ is consequently often referred to as the fractional flow.

    In summary, what we get are the following two equations:

    \begin{eqnarray*}
   - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p) &=& q
@@ -241,7 +241,7 @@
   \qquad \textrm{in}\ \Omega\times[0,T].
 \end{eqnarray*}

    -

    Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

    +

    Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$.

    This set of equations has a peculiar character: one of the two equations has a time derivative, the other one doesn't. This corresponds to the character that the pressure and velocities are coupled through an instantaneous constraint, whereas the saturation evolves over finite time scales.

    Such systems of equations are called Differential Algebraic Equations (DAEs), since one of the equations is a differential equation, the other is not (at least not with respect to the time variable) and is therefore an "algebraic" equation. (The notation comes from the field of ordinary differential equations, where everything that does not have derivatives with respect to the time variable is necessarily an algebraic equation.) This class of equations contains pretty well-known cases: for example, the time dependent Stokes and Navier-Stokes equations (where the algebraic constraint is that the divergence of the flow field, $\textrm{div}\ \mathbf u$, must be zero) as well as the time dependent Maxwell equations (here, the algebraic constraint is that the divergence of the electric displacement field equals the charge density, $\textrm{div}\ \mathbf D = \rho$ and that the divergence of the magnetic flux density is zero: $\textrm{div}\ \mathbf
 B = 0$); even the quasistatic model of step-18 falls into this category. We will see that the different character of the two equations will inform our discretization strategy for the two equations.

    @@ -263,7 +263,7 @@

    where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

    -

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    +

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    \begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
@@ -272,7 +272,7 @@
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
 \end{eqnarray*}

    -

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    +

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    For the saturation equation, we obtain after integrating by parts

    \begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
@@ -306,7 +306,7 @@
 </p>
 <p> We introduce an object of type <a class=DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

    Space discretization

    -

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    +

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

    \begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
@@ -320,7 +320,7 @@
 <p> where <picture><source srcset=$\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
 \partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12.

    Linear solvers

    -

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    +

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    \[
 \left(
 \begin{array}{ccc}
@@ -342,7 +342,7 @@
 \right)
 \]

    -

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    +

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    \begin{eqnarray*}
 M^u(S^n)_{ij} &=&
 \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{v}_i,\mathbf
@@ -372,7 +372,7 @@
 (S^n,\phi_i)_\Omega +\triangle t \sum_K  \left(F(S^n) q^{n+1}, \phi_i\right)_K.
 \end{eqnarray*}

    -
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.
    +
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.

    The system above presents a complication: Since the matrix $H_{ij}$ depends on $\mathbf u^{n+1}$ implicitly (the velocities are needed to determine which parts of the boundaries $\partial K$ of cells are influx or outflux parts), we can only assemble this matrix after we have solved for the velocities.

    The solution scheme then involves the following steps:

    1. @@ -409,7 +409,7 @@

      For simplicity, this program assumes that there is no source, $q=0$, and that the heterogeneous porous medium is isotropic $\mathbf{K}(\mathbf{x}) =
 k(\mathbf{x}) \mathbf{I}$. The first one of these is a realistic assumption in oil reservoirs: apart from injection and production wells, there are usually no mechanisms for fluids to appear or disappear out of the blue. The second one is harder to justify: on a microscopic level, most rocks are isotropic, because they consist of a network of interconnected pores. However, this microscopic scale is out of the range of today's computer simulations, and we have to be content with simulating things on the scale of meters. On that scale, however, fluid transport typically happens through a network of cracks in the rock, rather than through pores. However, cracks often result from external stress fields in the rock layer (for example from tectonic faulting) and the cracks are therefore roughly aligned. This leads to a situation where the permeability is often orders of magnitude larger in the direction parallel to the cracks than perpendicular to the cracks. A problem typically faces in reservoir simulation, however, is that the modeler doesn't know the direction of cracks because oil reservoirs are not accessible to easy inspection. The only solution in that case is to assume an effective, isotropic permeability.

      Whatever the matter, both of these restrictions, no sources and isotropy, would be easy to lift with a few lines of code in the program.

      -

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      +

      Next, for simplicity, our numerical simulation will be done on the unit cell $\Omega = [0,1]\times [0,1]$ for $t\in [0,T]$. Our initial conditions are $S(\mathbf{x},0)=0$; in the oil reservoir picture, where $S$ would indicate the water saturation, this means that the reservoir contains pure oil at the beginning. Note that we do not need any initial conditions for pressure or velocity, since the equations do not contain time derivatives of these variables. Finally, we impose the following pressure boundary conditions:

      \[
   p(\mathbf{x},t)=1-x_1 \qquad \textrm{on}\ \partial\Omega.
 \] @@ -439,7 +439,7 @@ \]" src="form_3349.png"/>

      Note
      Coming back to this testcase in step-43 several years later revealed an oddity in the setup of this testcase. To this end, consider that we can rewrite the advection equation for the saturation as $S_{t} + (\mathbf{u}
-F'(S)) \cdot \nabla S = 0$. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.
      +F'(S)) \cdot \nabla S = 0$" src="form_3350.png"/>. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.

      Finally, to come back to the description of the testcase, we will show results for computations with the two permeability functions introduced at the end of the results section of step-20:

      • A function that models a single, winding crack that snakes through the domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function:

        @@ -464,7 +464,7 @@ e^{-\left(\frac{|\mathbf{x}-\mathbf{x}_i|}{0.05}\right)^2}, \end{eqnarray*}" src="form_3356.png"/>

        - where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.
      • + where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.

      The commented program

      This program is an adaptation of step-20 and includes some technique of DG methods from step-12. A good part of the program is therefore very similar to step-20 and we will not comment again on these parts. Only the new stuff will be discussed in more detail.

      @@ -523,7 +523,7 @@
    2. project_back_saturation resets all saturation degrees of freedom with values less than zero to zero, and all those with saturations greater than one to one.
    3. -

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

      +

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

        template <int dim>
        class TwoPhaseFlowProblem
        {
      @@ -877,7 +877,7 @@

      TwoPhaseFlowProblem class implementation

      Here now the implementation of the main class. Much of it is actually copied from step-20, so we won't comment on it in much detail. You should try to get familiar with that program first, then most of what is happening here should be mostly clear.

      TwoPhaseFlowProblem::TwoPhaseFlowProblem

      -

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

      +

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

        template <int dim>
        TwoPhaseFlowProblem<dim>::TwoPhaseFlowProblem(const unsigned int degree)
        : degree(degree)
      @@ -1131,8 +1131,8 @@
        fe_values.get_function_values(old_solution, old_solution_values);
        fe_values.get_function_values(solution, present_solution_values);
       
      -

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
-   \sigma)$, where $\sigma$ is the saturation component of the test function:

      +

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
+   \sigma)$, where $\sigma$ is the saturation component of the test function:

        for (unsigned int q = 0; q < n_q_points; ++q)
        for (unsigned int i = 0; i < dofs_per_cell; ++i)
        {
      @@ -1421,7 +1421,7 @@
      void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)

      The main function

      -

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

      +

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

        int main()
        {
        try
      @@ -1483,10 +1483,10 @@
      ...

      As we can see, the time step is pretty much constant right from the start, which indicates that the velocities in the domain are not strongly dependent on changes in saturation, although they certainly are through the factor $\lambda(S)$ in the pressure equation.

      Our second observation is that the number of CG iterations needed to solve the pressure Schur complement equation drops from 22 to 17 between the first and the second time step (in fact, it remains around 17 for the rest of the computations). The reason is actually simple: Before we solve for the pressure during a time step, we don't reset the solution variable to zero. The pressure (and the other variables) therefore have the previous time step's values at the time we get into the CG solver. Since the velocities and pressures don't change very much as computations progress, the previous time step's pressure is actually a good initial guess for this time step's pressure. Consequently, the number of iterations we need once we have computed the pressure once is significantly reduced.

      -

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      +

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      With all this, here are a few movies that show how the saturation progresses over time. First, this is for the single crack model, as implemented in the SingleCurvingCrack::KInverse class:

      -

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      +

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      The second movie shows the saturation for the random medium model of class RandomMedium::KInverse, where we have randomly distributed centers of high permeability and fluid hops from one of these zones to the next:

      Finally, here is the same situation in three space dimensions, on a mesh with n_refinement_steps=5, which produces a mesh of some 32,000 cells and 167,000 degrees of freedom:

      @@ -1494,24 +1494,24 @@

      To repeat these computations, all you have to do is to change the line

      TwoPhaseFlowProblem<2> two_phase_flow_problem(0);

      in the main function to

      TwoPhaseFlowProblem<3> two_phase_flow_problem(0);

      The visualization uses a cloud technique, where the saturation is indicated by colored but transparent clouds for each cell. This way, one can also see somewhat what happens deep inside the domain. A different way of visualizing would have been to show isosurfaces of the saturation evolving over time. There are techniques to plot isosurfaces transparently, so that one can see several of them at the same time like the layers of an onion.

      -

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      +

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      Possibilities for extensions

      There are a number of areas where this program can be improved. Three of them are listed below. All of them are, in fact, addressed in a tutorial program that forms the continuation of the current one: step-43.

      Solvers

      -

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      -

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      -

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
-B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      -

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
-\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      +

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      +

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      +

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
+B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      +

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
+\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-12-27 18:25:18.892943008 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-12-27 18:25:18.896943036 +0000 @@ -180,36 +180,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3374.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3381.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3384.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -218,14 +218,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3386.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -237,23 +237,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3389.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -261,10 +261,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3391.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -274,19 +274,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3392.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3393.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -296,43 +296,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3397.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3398.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3400.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3404.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -362,17 +362,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-12-27 18:25:18.956943448 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-12-27 18:25:18.964943503 +0000
@@ -145,8 +145,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -164,10 +164,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3483.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -180,12 +180,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3485.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -210,37 +210,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3486.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3489.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p>The equations above (called the <em>semidiscretized</em> equations because we have only discretized the time, but not space), can be simplified a bit by eliminating <picture><source srcset=$v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3497.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -260,15 +260,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3503.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -288,10 +288,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3512.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -303,14 +303,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3513.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -319,34 +319,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3518.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3519.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3521.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<picture><source srcset=\[
         k\le \frac hc
-\] +\]" src="form_3522.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -360,7 +360,7 @@
         &&\text{otherwise}
         \end{matrix}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-12-27 18:25:19.012943832 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-12-27 18:25:19.012943832 +0000
@@ -144,100 +144,100 @@
 <p><a class=

        The problem

        The temperature at a given location, neglecting thermal diffusion, can be stated as

        -\[
+<picture><source srcset=\[
 \rho C_p \frac{\partial}{\partial t}T(t,\mathbf r) = H(t,\mathbf r)
-\] +\]" src="form_3553.png"/>

        -

        Here $\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        -

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        -\[
+<p>Here <picture><source srcset=$\rho (\mathbf r) $ is the density; $C_p (\mathbf r) $ is the specific heat; $\frac{\partial T}{\partial t}(t,\mathbf r)$ is the temperature rise due to the delivered microwave energy; and $H(t,\mathbf r)$ is the heating function defined as the thermal energy per time and volume transformed from deposited microwave energy.

        +

        Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in pressure by acceleration:

        +\[
 \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
 -\nabla p(t,\mathbf r).
-\] +\]" src="form_3558.png"/>

        Furthermore, it contracts due to excess pressure and expands based on changes in temperature:

        -\[
+<picture><source srcset=\[
 \nabla \cdot u(t,\mathbf r) = -\frac{p(t,\mathbf r)}{\rho c_0^2}+\beta T(t,\mathbf r) .
-\] +\]" src="form_3559.png"/>

        Here, $\beta$ is a thermoexpansion coefficient.

        -

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        -\[
+<p>Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate <picture><source srcset=$H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
+r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
-\] +\]" src="form_3564.png"/>

        -

        where $\lambda = - \frac{\beta}{C_p}$.

        +

        where $\lambda = - \frac{\beta}{C_p}$.

        This somewhat strange equation with the derivative of a Dirac delta function on the right hand side can be rewritten as an initial value problem as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \Delta \bar{p}- \frac{1}{c_0^2} \frac{\partial^2 \bar{p}}{\partial t^2} & = &
 0 \\
 \bar{p}(0,\mathbf r) &=& c_0^2 \lambda a(\mathbf r) = b(\mathbf r)  \\
 \frac{\partial\bar{p}(0,\mathbf r)}{\partial t} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3566.png"/>

        (A derivation of this transformation into an initial value problem is given at the end of this introduction as an appendix.)

        -

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        +

        In the inverse problem, it is the initial condition $b(\mathbf r) = c_0^2 \lambda a(\mathbf r)$ that one would like to recover, since it is a map of absorption strengths for microwave energy, and therefore presumably an indicator to discern healthy from diseased tissue.

        In real application, the thermoacoustic source is very small as compared to the medium. The propagation path of the thermoacoustic waves can then be approximated as from the source to the infinity. Furthermore, detectors are only a limited distance from the source. One only needs to evaluate the values when the thermoacoustic waves pass through the detectors, although they do continue beyond. This is therefore a problem where we are only interested in a small part of an infinite medium, and we do not want waves generated somewhere to be reflected at the boundary of the domain which we consider interesting. Rather, we would like to simulate only that part of the wave field that is contained inside the domain of interest, and waves that hit the boundary of that domain to simply pass undisturbed through the boundary. In other words, we would like the boundary to absorb any waves that hit it.

        In general, this is a hard problem: Good absorbing boundary conditions are nonlinear and/or numerically very expensive. We therefore opt for a simple first order approximation to absorbing boundary conditions that reads

        -\[
+<picture><source srcset=\[
 \frac{\partial\bar{p}}{\partial\mathbf n} =
 -\frac{1}{c_0} \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3568.png"/>

        -

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        +

        Here, $\frac{\partial\bar{p}}{\partial\mathbf n}$ is the normal derivative at the boundary. It should be noted that this is not a particularly good boundary condition, but it is one of the very few that are reasonably simple to implement.

        Weak form and discretization

        As in step-23, one first introduces a second variable, which is defined as the derivative of the pressure potential:

        -\[
+<picture><source srcset=\[
 v = \frac{\partial\bar{p}}{\partial t}
-\] +\]" src="form_3570.png"/>

        With the second variable, one then transforms the forward problem into two separate equations:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}_{t} - v & = & 0 \\
 \Delta\bar{p} - \frac{1}{c_0^2}\,v_{t} & = & f
-\end{eqnarray*} +\end{eqnarray*}" src="form_3571.png"/>

        with initial conditions:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \bar{p}(0,\mathbf r) & = & b(r) \\
 v(0,\mathbf r)=\bar{p}_t(0,\mathbf r) & = & 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3572.png"/>

        -

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        +

        Note that we have introduced a right hand side $f(t,\mathbf r)$ here to show how to derive these formulas in the general case, although in the application to the thermoacoustic problem $f=0$.

        The semi-discretized, weak version of this model, using the general $\theta$ scheme introduced in step-23 is then:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_\Omega-
 \left(\theta v^{n}+(1-\theta)v^{n-1},\phi\right)_\Omega & = & 0   \\
 -\left(\nabla((\theta\bar{p}^n+(1-\theta)\bar{p}^{n-1})),\nabla\phi\right)_\Omega-
 \frac{1}{c_0}\left(\frac{\bar{p}^n-\bar{p}^{n-1}}{k},\phi\right)_{\partial\Omega} -
 \frac{1}{c_0^2}\left(\frac{v^n-v^{n-1}}{k},\phi\right)_\Omega & =
 & \left(\theta f^{n}+(1-\theta)f^{n-1}, \phi\right)_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3574.png"/>

        where $\phi$ is an arbitrary test function, and where we have used the absorbing boundary condition to integrate by parts: absorbing boundary conditions are incorporated into the weak form by using

        -\[
+<picture><source srcset=\[
 \int_\Omega\varphi \, \Delta p\; dx =
 -\int_\Omega\nabla \varphi \cdot \nabla p dx +
 \int_{\partial\Omega}\varphi \frac{\partial p}{\partial {\mathbf n}}ds.
-\] +\]" src="form_3575.png"/>

        From this we obtain the discrete model by introducing a finite number of shape functions, and get

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 M\bar{p}^{n}-k \theta M v^n & = & M\bar{p}^{n-1}+k (1-\theta)Mv^{n-1},\\
 (-c_0^2k \theta A-c_0 B)\bar{p}^n-Mv^{n} & = &
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3576.png"/>

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        -\[
+<p> The matrices <picture><source srcset=$M$ and $A$ are here as in step-23, and the boundary mass matrix

        +\[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
-\] +\]" src="form_3577.png"/>

        results from the use of absorbing boundary conditions.

        Above two equations can be rewritten in a matrix form with the pressure and its derivative as an unknown vector:

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{cc}
  M         &       -k\theta M \\
 c_0^2\,k\,\theta\,A+c_0\,B  &  M   \\
@@ -250,10 +250,10 @@
  G_1  \\
  G_2 -(\theta F^{n}+(1-\theta)F ^{n-1})c_{0}^{2}k \\
                 \end{array}\right)
-\] +\]" src="form_3578.png"/>

        where

        -\[
+<picture><source srcset=\[
 \left(\begin{array}{c}
 G_1 \\
 G_2 \\
@@ -262,115 +262,115 @@
  M\bar{p}^{n-1}+k(1-\theta)Mv^{n-1}\\
  (-c_{0}^{2}k (1-\theta)A+c_0 B)\bar{p}^{n-1} +Mv^{n-1}
                 \end{array}\right)
-\] +\]" src="form_3579.png"/>

        By simple transformations, one then obtains two equations for the pressure potential and its derivative, just as in the previous tutorial program:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (M+(k\,\theta\,c_{0})^{2}A+c_0k\theta B)\bar{p}^{n} & = &
 G_{1}+(k\, \theta)G_{2}-(c_0k)^2\theta (\theta F^{n}+(1-\theta)F^{n-1}) \\
 Mv^n & = & -(c_0^2\,k\, \theta\, A+c_0B)\bar{p}^{n}+ G_2 -
 c_0^2k(\theta F^{n}+(1-\theta)F^{n-1})
-\end{eqnarray*} +\end{eqnarray*}" src="form_3580.png"/>

        What the program does

        Compared to step-23, this programs adds the treatment of a simple absorbing boundary conditions. In addition, it deals with data obtained from actual experimental measurements. To this end, we need to evaluate the solution at points at which the experiment also evaluates a real pressure field. We will see how to do that using the VectorTools::point_value function further down below.

        Appendix: PDEs with Dirac delta functions as right hand side and their transformation to an initial value problem

        In the derivation of the initial value problem for the wave equation, we initially found that the equation had the derivative of a Dirac delta function as a right hand side:

        -\[
+<picture><source srcset=\[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}.
-\] +\]" src="form_3581.png"/>

        -

        In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e. $p(t,\mathbf
-r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        -\[
+<p> In order to see how to transform this single equation into the usual statement of a PDE with initial conditions, let us make the assumption that the physically quite reasonable medium is at rest initially, i.e.  <picture><source srcset=$p(t,\mathbf
+r)=\frac{\partial p(t,\mathbf r)}{\partial t}=0$ for $t<0$. Next, let us form the indefinite integral with respect to time of both sides:

        +\[
 \int^t \Delta p\; dt -\int^t \frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2}
 \; dt
 =
 \int^t \lambda a(\mathbf r)\frac{d\delta(t)}{dt} \;dt.
-\] +\]" src="form_3584.png"/>

        This immediately leads to the statement

        -\[
+<picture><source srcset=\[
 P(t,\mathbf r) - \frac{1}{c_0^2} \frac{\partial p}{\partial t}
 =
 \lambda a(\mathbf r) \delta(t),
-\] /usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-12-27 18:25:19.072944244 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-12-27 18:25:19.076944272 +0000 @@ -166,14 +166,14 @@ \end{eqnarray*}" src="form_3621.png"/>

        Discretization of the equations in time

        -

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        +

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        \begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k} - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,\\
   \frac{v^n - v^{n-1}}{k} - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& -\sin\left[\theta u^n + (1-\theta) u^{n-1}\right].
 \end{eqnarray*}

        -

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        +

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        \begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
@@ -182,8 +182,8 @@
          - k\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right].
 \end{eqnarray*}

        -

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        -

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        +

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        +

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        \begin{eqnarray*}
   \mbox{ Find } \delta u^n_l \mbox{ s.t. } F'(u^n_l)\delta u^n_l = -F(u^n_l)
   \mbox{, set }  u^n_{l+1} = u^n_l + \delta u^n_l.
@@ -200,7 +200,7 @@
 </p>
 <p> Notice that while <picture><source srcset=$F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

        Weak formulation of the time-discretized equations

        -

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        +

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        \begin{eqnarray*}
   &\mbox{ Find}& \delta u^n_l \in H^1(\Omega) \mbox{ s.t. }
   \left( F'(u^n_l)\delta u^n_l, \varphi \right)_{\Omega}
@@ -214,10 +214,10 @@
          \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega).
 \end{eqnarray*}

        -

        Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
+<p> Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, <picture><source srcset=$F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
 \,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

        Discretization of the weak formulation in space

        -

        Using the Finite Element Method, we discretize the variational formulation in space. To this end, let $V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
+<p>Using the Finite Element Method, we discretize the variational formulation in space. To this end, let <picture><source srcset=$V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
 < \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
 U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
 H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

        @@ -236,9 +236,9 @@ + k^2\theta^2N(u^n_l,u^{n-1}) \end{eqnarray*}" src="form_3648.png"/>

        -

        Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
-\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
-  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
+<p> Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, <picture><source srcset=$M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
+\varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
+  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
   \varphi_j \right)_{\Omega}$.

        What solvers can we use for the first equation? Let's look at the matrix we have to invert:

        \[
@@ -248,12 +248,12 @@
   + k^2 \theta^2 \int_\Omega \nabla\varphi_i\nabla\varphi_j \; dx,
 \]

        -

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        -

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        +

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        +

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        The test case

        There are a few analytical solutions for the sine-Gordon equation, both in 1D and 2D. In particular, the program as is computes the solution to a problem with a single kink-like solitary wave initial condition. This solution is given by Leibbrandt in Phys. Rev. Lett. 41(7), and is implemented in the ExactSolution class.

        It should be noted that this closed-form solution, strictly speaking, only holds for the infinite-space initial-value problem (not the Neumann initial-boundary-value problem under consideration here). However, given that we impose zero Neumann boundary conditions, we expect that the solution to our initial-boundary-value problem would be close to the solution of the infinite-space initial-value problem, if reflections of waves off the boundaries of our domain do not occur. In practice, this is of course not the case, but we can at least assume that this were so.

        -

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        +

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        The solutions that we implement in the ExactSolution class are these:

        • In 1D:

          @@ -341,7 +341,7 @@

          The entire algorithm for solving the problem is encapsulated in this class. As in previous example programs, the class is declared with a template parameter, which is the spatial dimension, so that we can solve the sine-Gordon equation in one, two or three spatial dimensions. For more on the dimension-independent class-encapsulation of the problem, the reader should consult step-3 and step-4.

          Compared to step-23 and step-24, there isn't anything newsworthy in the general structure of the program (though there is of course in the inner workings of the various functions!). The most notable difference is the presence of the two new functions compute_nl_term and compute_nl_matrix that compute the nonlinear contributions to the system matrix and right-hand side of the first equation, as discussed in the Introduction. In addition, we have to have a vector solution_update that contains the nonlinear update to the solution vector in each Newton step.

          As also mentioned in the introduction, we do not store the velocity variable in this program, but the mass matrix times the velocity. This is done in the M_x_velocity variable (the "x" is intended to stand for "times").

          -

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

          +

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

            template <int dim>
            class SineGordonProblem
            {
          @@ -487,7 +487,7 @@

          Let's move on to the implementation of the main class, as it implements the algorithm outlined in the introduction.

          SineGordonProblem::SineGordonProblem

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          -

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

          +

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

            template <int dim>
            SineGordonProblem<dim>::SineGordonProblem()
            : fe(1)
          @@ -502,7 +502,7 @@
           
          STL namespace.

          SineGordonProblem::make_grid_and_dofs

          -

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

          +

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

            template <int dim>
            void SineGordonProblem<dim>::make_grid_and_dofs()
            {
          @@ -780,7 +780,7 @@
            << "advancing to t = " << time << '.' << std::endl;
           

          At the beginning of each time step we must solve the nonlinear equation in the split formulation via Newton's method — i.e. solve for $\delta U^{n,l}$ then compute $U^{n,l+1}$ and so on. The stopping criterion for this nonlinear iteration is that $\|F_h(U^{n,l})\|_2 \le 10^{-6} \|F_h(U^{n,0})\|_2$. Consequently, we need to record the norm of the residual in the first iteration.

          -

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

          +

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

            double initial_rhs_norm = 0.;
            bool first_iteration = true;
            do
          @@ -804,7 +804,7 @@
           
            std::cout << " CG iterations per nonlinear step." << std::endl;
           
          -

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

          +

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

            Vector<double> tmp_vector(solution.size());
            laplace_matrix.vmult(tmp_vector, solution);
            M_x_velocity.add(-time_step * theta, tmp_vector);
          @@ -863,7 +863,7 @@
            return 0;
            }

          Results

          -

          The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

          +

          The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

          In the simulations below, we solve the sine-Gordon equation on the interval $\Omega =
 [-10,10]$ in 1D and on the square $\Omega = [-10,10]\times [-10,10]$ in 2D. In each case, the respective grid is refined uniformly 6 times, i.e. $h\sim
 2^{-6}$.

          @@ -873,7 +873,7 @@ u_{\mathrm{breather}}(x,t) = -4\arctan \left(\frac{m}{\sqrt{1-m^2}} \frac{\sin\left(\sqrt{1-m^2}t +c_2\right)}{\cosh(mx+c_1)} \right), \]" src="form_3693.png"/>

          -

          where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

          +

          where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

          Animation of the 1D stationary breather.

          Though not shown how to do this in the program, another way to visualize the (1+1)-d solution is to use output generated by the DataOutStack class; it allows to "stack" the solutions of individual time steps, so that we get 2D space-time graphs from 1D time-dependent solutions. This produces the space-time plot below instead of the animation above.

          A space-time plot of the 1D stationary breather.

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 2024-12-27 18:25:19.128944629 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 2024-12-27 18:25:19.132944656 +0000 @@ -179,8 +179,8 @@ \right]. \end{align*}" src="form_3732.png"/>

          -

          Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

          -

          Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

          +

          Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

          +

          Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

          \begin{align*}
   M U^n-MU^{n-1}
   +
@@ -198,7 +198,7 @@
   \right],
 \end{align*}

          -

          where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

          +

          where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

          \begin{align*}
   (M
   +
@@ -224,7 +224,7 @@
 <ul>
 <li>
 <p class=Time step size and minimal mesh size: For stationary problems, the general approach is "make the mesh as fine as it is necessary". For problems with singularities, this often leads to situations where we get many levels of refinement into corners or along interfaces. The very first tutorial to use adaptive meshes, step-6, is a point in case already.

          -

          However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

          +

          However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

          The consequence is that refining the mesh further in one place implies not only the moderate additional effort of increasing the number of degrees of freedom slightly, but also the much larger effort of having the solve the global linear system more often because of the smaller time step.

          In practice, one typically deals with this by acknowledging that we can not make the time step arbitrarily small, and consequently can not make the local mesh size arbitrarily small. Rather, we set a maximal level of refinement and when we flag cells for refinement, we simply do not refine those cells whose children would exceed this maximal level of refinement.

          There is a similar problem in that we will choose a right hand side that will switch on in different parts of the domain at different times. To avoid being caught flat footed with too coarse a mesh in areas where we suddenly need a finer mesh, we will also enforce in our program a minimal mesh refinement level.

          @@ -255,7 +255,7 @@ \sum_j U^n \varphi_j(\mathbf x), \end{align*}" src="form_3742.png"/>

          -

          multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

          +

          multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

          \begin{align*}
     \sum_j
     (M
@@ -275,7 +275,7 @@
     \right].
   \end{align*}

          -

          Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

          +

          Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

          \begin{align*}
     (\varphi_i, u_h^{n-1})
     =
@@ -287,7 +287,7 @@
     i=1\ldots N_n.
   \end{align*}

          -

          If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

          +

          If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

          In any case, what we have to face is a situation where we need to integrate shape functions defined on two different meshes. This can be done, and is in fact demonstrated in step-28, but the process is at best described by the word "awkward".

          In practice, one does not typically want to do this. Rather, we avoid the whole situation by interpolating the solution from the old to the new mesh every time we adapt the mesh. In other words, rather than solving the equations above, we instead solve the problem

          \begin{align*}
@@ -309,14 +309,14 @@
     \right],
   \end{align*}

          -

          where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

          +

          where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

        What could possibly go wrong? Verifying whether the code is correct

        There are a number of things one can typically get wrong when implementing a finite element code. In particular, for time dependent problems, the following are common sources of bugs:

        • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
        • -
        • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
        • -
        • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.
        • +
        • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
        • +
        • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.

        A less common problem is getting the initial conditions wrong because one can typically see that it is wrong by just outputting the first time step. In any case, in order to verify the correctness of the code, it is helpful to have a testing protocol that allows us to verify each of these components separately. This means:

        • Testing the code with nonzero initial conditions but zero right hand side and boundary values and verifying that the time evolution is correct.
        • @@ -423,7 +423,7 @@ \right. \end{align*}" src="form_3770.png"/>

          -

          In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

          +

          In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

          If you interpret the heat equation as finding the spatially and temporally variable temperature distribution of a conducting solid, then the test case above corresponds to an L-shaped body where we keep the boundary at zero temperature, and heat alternatingly in two parts of the domain. While heating is in effect, the temperature rises in these places, after which it diffuses and diminishes again. The point of these initial conditions is that they provide us with a solution that has singularities both in time (when sources switch on and off) as well as time (at the reentrant corner as well as at the edges and corners of the regions where the source acts).

          The commented program

          The program starts with the usual include files, all of which you should have seen before by now:

          @@ -809,7 +809,7 @@
            system_rhs.add(-(1 - theta) * time_step, tmp);
           

          The second piece is to compute the contributions of the source terms. This corresponds to the term $k_n
-   \left[ (1-\theta)F^{n-1} + \theta F^n \right]$. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

          + \left[ (1-\theta)F^{n-1} + \theta F^n \right]$" src="form_3773.png"/>. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

            RightHandSide<dim> rhs_function;
            rhs_function.set_time(time);
          @@ -1015,27 +1015,27 @@

          There are two factors at play. First, there are some islands where cells have been refined but that are surrounded by non-refined cells (and there are probably also a few occasional coarsened islands). These are not terrible, as they most of the time do not affect the approximation quality of the mesh, but they also don't help because so many of their additional degrees of freedom are in fact constrained by hanging node constraints. That said, this is easy to fix: the Triangulation class takes an argument to its constructor indicating a level of "mesh smoothing". Passing one of many possible flags, this instructs the triangulation to refine some additional cells, or not to refine some cells, so that the resulting mesh does not have these artifacts.

          The second problem is more severe: the mesh appears to lag the solution. The underlying reason is that we only adapt the mesh once every fifth time step, and only allow for a single refinement in these cases. Whenever a source switches on, the solution had been very smooth in this area before and the mesh was consequently rather coarse. This implies that the next time step when we refine the mesh, we will get one refinement level more in this area, and five time steps later another level, etc. But this is not enough: first, we should refine immediately when a source switches on (after all, in the current context we at least know what the right hand side is), and we should allow for more than one refinement level. Of course, all of this can be done using deal.II, it just requires a bit of algorithmic thinking in how to make this work!

          Positivity preservation

          -

          To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

          +

          To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

          To get an idea of this behavior mathematically, let us consider a general, fully discrete problem:

          \begin{align*}
   A u^{n} = B u^{n-1}.
 \end{align*}

          -

          The general form of the $i$th equation then reads:

          +

          The general form of the $i$th equation then reads:

          \begin{align*}
   a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
   \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
 \end{align*}

          -

          where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

          +

          where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

          \begin{align*}
   a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
   &
   \forall j &\in S_i,
 \end{align*}

          -

          all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

          -

          Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

          +

          all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

          +

          Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

          \begin{align*}
   (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
   &
/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-12-27 18:25:19.200945123 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-12-27 18:25:19.192945068 +0000
@@ -159,7 +159,7 @@
 </ol> </td> </tr> </table>
  <a class=

          Introduction

          This tutorial program attempts to show how to use $hp$-finite element methods with deal.II. It solves the Laplace equation and so builds only on the first few tutorial programs, in particular on step-4 for dimension independent programming and step-6 for adaptive mesh refinement.

          -

          The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

          +

          The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

          In order to implement this method, we need several things above and beyond what a usual finite element program needs, and in particular above what we have introduced in the tutorial programs leading up to step-6. In particular, we will have to discuss the following aspects:

          • Instead of using the same finite element on all cells, we now will want a collection of finite element objects, and associate each cell with one of these objects in this collection.

            @@ -235,10 +235,10 @@

            One of the central pieces of the adaptive finite element method is that we inspect the computed solution (a posteriori) with an indicator that tells us which are the cells where the error is largest, and then refine them. In many of the other tutorial programs, we use the KellyErrorEstimator class to get an indication of the size of the error on a cell, although we also discuss more complicated strategies in some programs, most importantly in step-14.

            In any case, as long as the decision is only "refine this cell" or "do not refine this cell", the actual refinement step is not particularly challenging. However, here we have a code that is capable of hp-refinement, i.e., we suddenly have two choices whenever we detect that the error on a certain cell is too large for our liking: we can refine the cell by splitting it into several smaller ones, or we can increase the polynomial degree of the shape functions used on it. How do we know which is the more promising strategy? Answering this question is the central problem in $hp$-finite element research at the time of this writing.

            -

            In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

            +

            In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

            In the following, we propose a simple estimator of the local smoothness of a solution. As we will see in the results section, this estimator has flaws, in particular as far as cells with local hanging nodes are concerned. We therefore do not intend to present the following ideas as a complete solution to the problem. Rather, it is intended as an idea to approach it that merits further research and investigation. In other words, we do not intend to enter a sophisticated proposal into the fray about answers to the general question. However, to demonstrate our approach to $hp$-finite elements, we need a simple indicator that does generate some useful information that is able to drive the simple calculations this tutorial program will perform.

            The idea

            -

            Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

            +

            Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

            \[
    \int_K |\nabla^s u({\bf x})|^2 \; d{\bf x} < \infty.
 \] @@ -285,7 +285,7 @@ \]" src="form_3800.png"/>

            Put differently: the higher regularity $s$ we want, the faster the Fourier coefficients have to go to zero. If you wonder where the additional exponent $\frac{d-1}2$ comes from: we would like to make use of the fact that $\sum_l a_l < \infty$ if the sequence $a_l =
-{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
+{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
 \ldots$. In the same way as we prove that the sequence $a_l$ above converges by replacing the sum by an integral over the entire line, we can replace our $d$-dimensional sum by an integral over $d$-dimensional space. Now we have to note that between distance $|{\bf k}|$ and $|{\bf k}|+d|{\bf k}|$, there are, up to a constant, $|{\bf k}|^{d-1}$ modes, in much the same way as we can transform the volume element $dx\;dy$ into $2\pi r\; dr$. Consequently, it is no longer $|{\bf k}|^{2s}|\hat
 U_{\bf k}|^2$ that has to decay as ${\cal O}(|{\bf k}|^{-1-\epsilon})$, but it is in fact $|{\bf k}|^{2s}|\hat U_{\bf k}|^2 |{\bf k}|^{d-1}$. A comparison of exponents yields the result.

            We can turn this around: Assume we are given a function $\hat u$ of unknown smoothness. Let us compute its Fourier coefficients $\hat U_{\bf k}$ and see how fast they decay. If they decay as

            @@ -295,7 +295,7 @@

            then consequently the function we had here was in $H^{\mu-d/2}$.

            What we have to do

            -

            So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

            +

            So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

            \[
    \hat U_{\bf k}
    = \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat u(\hat{\bf x}) d\hat{\bf x}
@@ -310,7 +310,7 @@
    d\hat{\bf x} \right] u_i,
 \]

            -

            where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

            +

            where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

            \[
    \hat U_{\bf k}
    = {\cal F}_{{\bf k},j} u_j,
@@ -323,7 +323,7 @@
    \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat \varphi_j(\hat{\bf x}) d\hat{\bf x}.
 \]

            -

            This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

            +

            This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

            The next task is that we have to estimate how fast these coefficients decay with $|{\bf k}|$. The problem is that, of course, we have only finitely many of these coefficients in the first place. In other words, the best we can do is to fit a function $\alpha |{\bf k}|^{-\mu}$ to our data points $\hat U_{\bf k}$, for example by determining $\alpha,\mu$ via a least-squares procedure:

            \[
    \min_{\alpha,\mu}
@@ -347,7 +347,7 @@
    \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2,
 \]

            -

            where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
 \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can write these conditions as follows:

            \[
    \left(\begin{array}{cc}
@@ -406,11 +406,11 @@
    }.
 \]

            -

            This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

            +

            This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

            These steps outlined above are applicable to many different scenarios, which motivated the introduction of a generic function SmoothnessEstimator::Fourier::coefficient_decay() in deal.II, that combines all the tasks described in this section in one simple function call. We will use it in the implementation of this program.

            Compensating for anisotropy

            In the formulas above, we have derived the Fourier coefficients $\hat U_{\bf
-k}$. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

            +k}$" src="form_3834.png"/>. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

            One can probably argue for either case. The issue would be of more interest if deal.II had the ability to use anisotropic finite elements, i.e., ones that use different polynomial degrees in different spatial directions, as they would be able to exploit the directionally variable smoothness much better. Alas, this capability does not exist at the time of writing this tutorial program.

            Either way, because we only have isotopic finite element classes, we adopt the viewpoint that we should tailor the polynomial degree to the lowest amount of regularity, in order to keep numerical efforts low. Consequently, instead of using the formula

            \[
@@ -431,7 +431,7 @@
    }.
 \]

            -

            To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

            +

            To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

            \[
   \sum_{{\bf k}, |{\bf k}|\le N}
   \longrightarrow
@@ -447,7 +447,7 @@
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln |{\bf k}|.
 \]

            -

            To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
+<p> To compensate for the transformation means not attempting to fit a decay <picture><source srcset=$|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
 k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

            \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln (|{\bf k}|h).
@@ -458,7 +458,7 @@
    \ln |\hat U_{{\bf k}}| - (\beta - \mu \ln h) + \mu \ln (|{\bf k}|).
 \]

            -

            In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

            +

            In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

            Complications with linear systems for hp-discretizations

            Creating the sparsity pattern

            One of the problems with $hp$-methods is that the high polynomial degree of shape functions together with the large number of constrained degrees of freedom leads to matrices with large numbers of nonzero entries in some rows. At the same time, because there are areas where we use low polynomial degree and consequently matrix rows with relatively few nonzero entries. Consequently, allocating the sparsity pattern for these matrices is a challenge: we cannot simply assemble a SparsityPattern by starting with an estimate of the bandwidth without using a lot of extra memory.

            @@ -472,7 +472,7 @@

            The early tutorial programs use first or second degree finite elements, so removing entries in the sparsity pattern corresponding to constrained degrees of freedom does not have a large impact on the overall number of zeros explicitly stored by the matrix. However, since as many as a third of the degrees of freedom may be constrained in an hp-discretization (and, with higher degree elements, these constraints can couple one DoF to as many as ten or twenty other DoFs), it is worthwhile to take these constraints into consideration since the resulting matrix will be much sparser (and, therefore, matrix-vector products or factorizations will be substantially faster too).

            Eliminating constrained degrees of freedom

            A second problem particular to $hp$-methods arises because we have so many constrained degrees of freedom: typically up to about one third of all degrees of freedom (in 3d) are constrained because they either belong to cells with hanging nodes or because they are on cells adjacent to cells with a higher or lower polynomial degree. This is, in fact, not much more than the fraction of constrained degrees of freedom in non- $hp$-mode, but the difference is that each constrained hanging node is constrained not only against the two adjacent degrees of freedom, but is constrained against many more degrees of freedom.

            -

            It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

            +

            It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

            In our program, we will also treat the boundary conditions as (possibly inhomogeneous) constraints and eliminate the matrix rows and columns to those as well. All we have to do for this is to call the function that interpolates the Dirichlet boundary conditions already in the setup phase in order to tell the AffineConstraints object about them, and then do the transfer from local to global data on matrix and vector simultaneously. This is exactly what we've shown in step-6.

            The test case

            The test case we will solve with this program is a re-take of the one we already look at in step-14: we solve the Laplace equation

            @@ -480,7 +480,7 @@ -\Delta u = f \]" src="form_3846.png"/>

            -

            in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

            +

            in 2d, with $f=(x+1)(y+1)$, and with zero Dirichlet boundary values for $u$. We do so on the domain $[-1,1]^2\backslash[-\frac 12,\frac 12]^2$, i.e., a square with a square hole in the middle.

            The difference to step-14 is of course that we use $hp$-finite elements for the solution. The test case is of interest because it has re-entrant corners in the corners of the hole, at which the solution has singularities. We therefore expect that the solution will be smooth in the interior of the domain, and rough in the vicinity of the singularities. The hope is that our refinement and smoothness indicators will be able to see this behavior and refine the mesh close to the singularities, while the polynomial degree is increased away from it. As we will see in the results section, this is indeed the case.

            The commented program

            Include files

            @@ -718,7 +718,7 @@
            void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const

          LaplaceProblem::solve

          -

          The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

          +

          The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

            template <int dim>
            void LaplaceProblem<dim>::solve()
            {
          @@ -742,7 +742,7 @@

          LaplaceProblem::postprocess

          -

          After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

          +

          After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

            template <int dim>
            void LaplaceProblem<dim>::postprocess(const unsigned int cycle)
            {
          @@ -797,7 +797,7 @@
            }
           
          std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
          Definition utilities.cc:470
          -

          After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

          +

          After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

            {
            estimated_error_per_cell,
          @@ -805,12 +805,12 @@
            0.03);
           
          void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
          -

          Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

          +

          Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

            dof_handler, smoothness_indicators, 0.2, 0.2);
           
          void p_adaptivity_from_relative_threshold(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
          -

          The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

          +

          The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

           
          void choose_p_over_h(const DoFHandler< dim, spacedim > &dof_handler)
          @@ -969,7 +969,7 @@

          The bigger question is, of course, how to avoid this problem. Possibilities include estimating the smoothness not on single cells, but cell assemblies or patches surrounding each cell. It may also be possible to find simple correction factors for each cell depending on the number of constrained degrees of freedom it has. In either case, there are ample opportunities for further research on finding good $hp$-refinement criteria. On the other hand, the main point of the current program was to demonstrate using the $hp$-technology in deal.II, which is unaffected by our use of a possible sub-optimal refinement criterion.

          Possibilities for extensions

          Different hp-decision strategies

          -

          This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

            +

            This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

            • Fourier coefficient decay: This is the strategy currently implemented in this tutorial. For more information on this strategy, see the general documentation of the SmoothnessEstimator::Fourier namespace.

              @@ -981,11 +981,11 @@
            • Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge after changing the discretization of the function space. With $h$-refinement the solution converges algebraically as already pointed out in step-7. If the solution is sufficiently smooth, though, we expect that the solution will converge exponentially with increasing polynomial degree of the finite element. We can compare a proper prediction of the error with the actual error in the following step to see if our choice of adaptation type was justified.

              -

              The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

              +

              The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

              Note that with this particular function you cannot predict the error for the next time step in time-dependent problems. Therefore, this strategy cannot be applied to this type of problem without further ado. Alternatively, the following approach could be used, which works for all the other strategies as well: start each time step with a coarse mesh, keep refining until happy with the result, and only then move on to the next time step.

            -

            Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

            +

            Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

            Parallel hp-adaptive finite elements

            All functionality presented in this tutorial already works for both sequential and parallel applications. It is possible without too much effort to change to either the parallel::shared::Triangulation or the parallel::distributed::Triangulation classes. If you feel eager to try it, we recommend reading step-18 for the former and step-40 for the latter case first for further background information on the topic, and then come back to this tutorial to try out your newly acquired skills.

            We go one step further in step-75: Here, we combine hp-adaptive and MatrixFree methods in combination with parallel::distributed::Triangulation objects.

            /usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-12-27 18:25:19.296945782 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-12-27 18:25:19.300945810 +0000 @@ -187,7 +187,7 @@

            Introduction

            In this example, we intend to solve the multigroup diffusion approximation of the neutron transport equation. Essentially, the way to view this is as follows: In a nuclear reactor, neutrons are speeding around at different energies, get absorbed or scattered, or start a new fission event. If viewed at long enough length scales, the movement of neutrons can be considered a diffusion process.

            -

            A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups $g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

            +

            A mathematical description of this would group neutrons into energy bins, and consider the balance equations for the neutron fluxes in each of these bins, or energy groups. The scattering, absorption, and fission events would then be operators within the diffusion equation describing the neutron fluxes. Assume we have energy groups $g=1,\ldots,G$, where by convention we assume that the neutrons with the highest energy are in group 1 and those with the lowest energy in group $G$. Then the neutron flux of each group satisfies the following equations:

            \begin{eqnarray*}
 \frac 1{v_g}\frac{\partial \phi_g(x,t)}{\partial t}
 &=&
@@ -210,7 +210,7 @@
 <li>
 Absorption <picture><source srcset=$\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.

          • -Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • +Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
          • @@ -232,7 +232,7 @@ s_{\mathrm{ext}}, \end{eqnarray*}" src="form_3873.png"/>

            -

            where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            +

            where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

            \begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2 = ((-L+F+X)\phi,\phi).
@@ -354,13 +354,13 @@
   F_i = \int_\Omega f(x) \varphi_g^i(x) \phi_{g'}(x) \ dx,
 \end{eqnarray*}

            -

            where $f(x)$ is one of the coefficient functions $\Sigma_{s,g'\to g}$ or $\nu\chi_g\Sigma_{f,g'}$ used in the right hand side of eigenvalue equation. The difficulty now is that $\phi_{g'}$ is defined on the mesh for energy group $g'$, i.e. it can be expanded as $\phi_{g'}(x)=\sum_j\phi_{g'}^j \varphi_{g'}^j(x)$, with basis functions $\varphi_{g'}^j(x)$ defined on mesh $g'$. The contribution to the right hand side can therefore be written as

            +

            where $f(x)$ is one of the coefficient functions $\Sigma_{s,g'\to g}$ or $\nu\chi_g\Sigma_{f,g'}$ used in the right hand side of eigenvalue equation. The difficulty now is that $\phi_{g'}$ is defined on the mesh for energy group $g'$, i.e. it can be expanded as $\phi_{g'}(x)=\sum_j\phi_{g'}^j \varphi_{g'}^j(x)$, with basis functions $\varphi_{g'}^j(x)$ defined on mesh $g'$. The contribution to the right hand side can therefore be written as

            \begin{eqnarray*}
   F_i = \sum_j \left\{\int_\Omega f(x) \varphi_g^i(x) \varphi_{g'}^j(x)
   \ dx \right\} \phi_{g'}^j ,
 \end{eqnarray*}

            -

            On the other hand, the test functions $\varphi_g^i(x)$ are defined on mesh $g$. This means that we can't just split the integral $\Omega$ into integrals over the cells of either mesh $g$ or $g'$, since the respectively other basis functions may not be defined on these cells.

            +

            On the other hand, the test functions $\varphi_g^i(x)$ are defined on mesh $g$. This means that we can't just split the integral $\Omega$ into integrals over the cells of either mesh $g$ or $g'$, since the respectively other basis functions may not be defined on these cells.

            The solution to this problem lies in the fact that both the meshes for $g$ and $g'$ are derived by adaptive refinement from a common coarse mesh. We can therefore always find a set of cells, which we denote by ${\cal T}_g \cap
 {\cal T}_{g'}$, that satisfy the following conditions:

            • @@ -399,7 +399,7 @@ \phi_g^i|_{K_c} = B_c^{il} \varphi_{g'}^l|_{K_c}. \end{eqnarray*}" src="form_3927.png"/>

              -

              Here, and in the following, summation over indices appearing twice is implied. The matrix $B_c$ is the matrix that interpolated data from a cell to its $c$-th child.

              +

              Here, and in the following, summation over indices appearing twice is implied. The matrix $B_c$ is the matrix that interpolated data from a cell to its $c$-th child.

              Then we can write the contribution of cell $K$ to the right hand side component $F_i$ as

              \begin{eqnarray*}
     F_i|_K
@@ -426,7 +426,7 @@
     = (B_c M_{K_c})^{ij} \phi_{g'}^j,
   \end{eqnarray*}

              -

              where $M_{K_c}^{lj}=\int_{K_c} f(x) \varphi_{g'}^l(x) \varphi_{g'}^j(x)$ is the weighted mass matrix on child $c$ of cell $K$.

              +

              where $M_{K_c}^{lj}=\int_{K_c} f(x) \varphi_{g'}^l(x) \varphi_{g'}^j(x)$ is the weighted mass matrix on child $c$ of cell $K$.

              The next question is what happens if a child $K_c$ of $K$ is not active. Then, we have to apply the process recursively, i.e. we have to interpolate the basis functions $\varphi_g^i$ onto child $K_c$ of $K$, then onto child $K_{cc'}$ of that cell, onto child $K_{cc'c''}$ of that one, etc, until we find an active cell. We then have to sum up all the contributions from all the children, grandchildren, etc, of cell $K$, with contributions of the form

              \begin{eqnarray*}
     F_i|_{K_{cc'}} = (B_cB_{c'} M_{K_{cc'}})^{ij}  \phi_{g'}^j,
@@ -501,7 +501,7 @@
 <p>Obviously, the arrangement of assemblies as well as the arrangement of rods inside them affect the distribution of neutron fluxes in the reactor (a fact that will be obvious by looking at the solution shown below in the results sections of this program). Fuel rods, for example, differ from each other in the enrichment of U-235 or Pu-239. Control rods, on the other hand, have zero fission, but nonzero scattering and absorption cross sections.</p>
 <p>This whole arrangement would make the description or spatially dependent material parameters very complicated. It will not become much simpler, but we will make one approximation: we merge the volume inhabited by each cylindrical rod and the surrounding water into volumes of quadratic cross section into so-called ‘pin cells’ for which homogenized material data are obtained with nuclear database and knowledge of neutron spectrum. The homogenization makes all material data piecewise constant on the solution domain for a reactor with fresh fuel. Spatially dependent material parameters are then looked up for the quadratic assembly in which a point is located, and then for the quadratic pin cell within this assembly.</p>
 <p>In this tutorial program, we simulate a quarter of a reactor consisting of  <picture><source srcset=$4
-\times 4$ assemblies. We use symmetry (Neumann) boundary conditions to reduce the problem to one quarter of the domain, and consequently only simulate a $2\times 2$ set of assemblies. Two of them will be UO ${}_2$ fuel, the other two of them MOX fuel. Each of these assemblies consists of $17\times 17$ rods of different compositions. In total, we therefore create a $34\times 34$ lattice of rods. To make things simpler later on, we reflect this fact by creating a coarse mesh of $34\times 34$ cells (even though the domain is a square, for which we would usually use a single cell). In deal.II, each cell has a material_id which one may use to associated each cell with a particular number identifying the material from which this cell's volume is made of; we will use this material ID to identify which of the 8 different kinds of rods that are used in this testcase make up a particular cell. Note that upon mesh refinement, the children of a cell inherit the material ID, making it simple to track the material even after mesh refinement.

              +\times 4$" src="form_3944.png"/> assemblies. We use symmetry (Neumann) boundary conditions to reduce the problem to one quarter of the domain, and consequently only simulate a $2\times 2$ set of assemblies. Two of them will be UO ${}_2$ fuel, the other two of them MOX fuel. Each of these assemblies consists of $17\times 17$ rods of different compositions. In total, we therefore create a $34\times 34$ lattice of rods. To make things simpler later on, we reflect this fact by creating a coarse mesh of $34\times 34$ cells (even though the domain is a square, for which we would usually use a single cell). In deal.II, each cell has a material_id which one may use to associated each cell with a particular number identifying the material from which this cell's volume is made of; we will use this material ID to identify which of the 8 different kinds of rods that are used in this testcase make up a particular cell. Note that upon mesh refinement, the children of a cell inherit the material ID, making it simple to track the material even after mesh refinement.

              The arrangement of the rods will be clearly visible in the images shown in the results section. The cross sections for materials and for both energy groups are taken from a OECD/NEA benchmark problem. The detailed configuration and material data is given in the code.

              What the program does (and how it does that)

              As a coarse overview of what exactly the program does, here is the basic layout: starting on a coarse mesh that is the same for each energy group, we compute inverse eigenvalue iterations to compute the $k$-eigenvalue on a given set of meshes. We stop these iterations when the change in the eigenvalue drops below a certain tolerance, and then write out the meshes and solutions for each energy group for inspection by a graphics program. Because the meshes for the solutions are different, we have to generate a separate output file for each energy group, rather than being able to add all energy group solutions into the same file.

              /usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 2024-12-27 18:25:19.368946277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 2024-12-27 18:25:19.364946249 +0000 @@ -156,20 +156,20 @@ \frac{\partial^2 U}{\partial t^2} - c^2 \Delta U = 0 \]" src="form_3956.png"/>

              -

              where $c$ is the wave speed (that for simplicity we assume to be constant), $U
+<p> where <picture><source srcset=$c$ is the wave speed (that for simplicity we assume to be constant), $U
 = U(x,t),\;x \in \Omega,\;t\in\mathrm{R}$. The boundary $\Gamma=\partial\Omega$ is divided into two parts $\Gamma_1$ and $\Gamma_2=\Gamma\setminus\Gamma_1$, with $\Gamma_1$ representing the transducer lens and $\Gamma_2$ an absorbing boundary (that is, we want to choose boundary conditions on $\Gamma_2$ in such a way that they imitate a larger domain). On $\Gamma_1$, the transducer generates a wave of constant frequency ${\omega}>0$ and constant amplitude (that we chose to be 1 here):

              \[
 U(x,t) = \cos{\omega t}, \qquad x\in \Gamma_1
 \]

              If there are no other (interior or boundary) sources, and since the only source has frequency $\omega$, then the solution admits a separation of variables of the form $U(x,t) = \textrm{Re}\left(u(x)\,e^{i\omega
-t})\right)$. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

              +t})\right)$" src="form_3964.png"/>. The complex-valued function $u(x)$ describes the spatial dependency of amplitude and phase (relative to the source) of the waves of frequency ${\omega}$, with the amplitude being the quantity that we are interested in. By plugging this form of the solution into the wave equation, we see that for $u$ we have

              \begin{eqnarray*}
 -\omega^2 u(x) - c^2\Delta u(x) &=& 0, \qquad x\in\Omega,\\
 u(x) &=& 1,  \qquad x\in\Gamma_1.
 \end{eqnarray*}

              -

              For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

              +

              For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

              \[
 c (n\cdot\nabla V) + \frac{\partial V}{\partial t} = (i\, c\, |k| - i\, \omega) V = 0.
 \] @@ -180,14 +180,14 @@ \]" src="form_3974.png"/>

              waves that hit the boundary $\Gamma_2$ at a right angle will be perfectly absorbed. On the other hand, those parts of the wave field that do not hit a boundary at a right angle do not satisfy this condition and enforcing it as a boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain.

              -

              If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

              +

              If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:

              \begin{eqnarray*}
 -\omega^2 u - c^2\Delta u &=& 0, \qquad x\in\Omega,\\
 c (n\cdot\nabla u) + i\,\omega\,u &=&0, \qquad x\in\Gamma_2,\\
 u &=& 1,  \qquad x\in\Gamma_1.
 \end{eqnarray*}

              -

              This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

              +

              This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

              \begin{eqnarray*}
   \left.\begin{array}{ccc}
     -\omega^2 v - c^2\Delta v &=& 0 \quad\\
@@ -205,7 +205,7 @@
   \end{array}\right\} &\;& x\in\Gamma_1.
 \end{eqnarray*}

              -

              For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

              +

              For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

              \begin{eqnarray*}
 -\omega^2 \langle \phi, v \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \phi, \nabla v \rangle_{\mathrm{L}^2(\Omega)}
@@ -215,7 +215,7 @@
 + c \omega \langle \psi, v \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0.
 \end{eqnarray*}

              -

              We choose finite element spaces $V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
+<p>We choose finite element spaces <picture><source srcset=$V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
 \{\psi_j\}_{j=1}^n$ and look for approximate solutions

              \[
 v_h = \sum_{j=1}^n \alpha_j \phi_j, \;\; w_h = \sum_{j=1}^n \beta_j \psi_j.
@@ -273,10 +273,10 @@
 \right)
 \]

              -

              (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

              +

              (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

              The test case

              -

              For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

              -

              In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

              +

              For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

              +

              In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

              The commented program

              Include files

              The following header files have all been discussed before:

              @@ -321,7 +321,7 @@

          The DirichletBoundaryValues class

          First we define a class for the function representing the Dirichlet boundary values. This has been done many times before and therefore does not need much explanation.

          -

          Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

          +

          Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

            template <int dim>
            class DirichletBoundaryValues : public Function<dim>
            {
          @@ -399,7 +399,7 @@
           
          -

          The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

          +

          The next subsection is devoted to the physical parameters appearing in the equation, which are the frequency $\omega$ and wave speed $c$. Again, both need to lie in the half-open interval $[0,\infty)$ represented by calling the Patterns::Double class with only the left end-point as argument:

            prm.enter_subsection("Physical constants");
            {
            prm.declare_entry("c", "1.5e5", Patterns::Double(0), "Wave speed");
          @@ -437,8 +437,8 @@
           
           

          The ComputeIntensity class

          -

          As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

          -

          So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

          +

          As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

          +

          So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

          In practice, the DataPostprocessor class only provides an interface to this functionality, and we need to derive our own class from it in order to implement the functions specified by the interface. In the most general case one has to implement several member functions but if the output quantity is a single scalar then some of this boilerplate code can be handled by a more specialized class, DataPostprocessorScalar and we can derive from that one instead. This is what the ComputeIntensity class does:

            template <int dim>
            class ComputeIntensity : public DataPostprocessorScalar<dim>
          @@ -455,7 +455,7 @@
          virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const

          In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

          -

          The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

          +

          The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

            template <int dim>
            ComputeIntensity<dim>::ComputeIntensity()
            : DataPostprocessorScalar<dim>("Intensity", update_values)
          @@ -463,7 +463,7 @@
           
           
          @ update_values
          Shape function values.
          -

          The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

          +

          The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

            template <int dim>
            void ComputeIntensity<dim>::evaluate_vector_field(
          @@ -471,7 +471,7 @@
            {
            AssertDimension(computed_quantities.size(), inputs.solution_values.size());
           
          -

          The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

          +

          The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

            for (unsigned int p = 0; p < computed_quantities.size(); ++p)
            {
            AssertDimension(computed_quantities[p].size(), 1);
          @@ -520,7 +520,7 @@ -

          The constructor takes the ParameterHandler object and stores it in a reference. It also initializes the DoF-Handler and the finite element system, which consists of two copies of the scalar $Q_1$ field, one for $v$ and one for $w$. In other words, we want the finite element space $Q_1\times Q_1 = Q_1^2$, which is easily constructed and passed as the constructor argument to the FESystem class (i.e., the type of the fe member being initialized here):

          +

          The constructor takes the ParameterHandler object and stores it in a reference. It also initializes the DoF-Handler and the finite element system, which consists of two copies of the scalar $Q_1$ field, one for $v$ and one for $w$. In other words, we want the finite element space $Q_1\times Q_1 = Q_1^2$, which is easily constructed and passed as the constructor argument to the FESystem class (i.e., the type of the fe member being initialized here):

            template <int dim>
            UltrasoundProblem<dim>::UltrasoundProblem(ParameterHandler &param)
            : prm(param)
          @@ -635,7 +635,7 @@
            dofs_per_cell = fe.n_dofs_per_cell();
           
          -

          The FEValues objects will evaluate the shape functions for us. For the part of the bilinear form that involves integration on $\Omega$, we'll need the values and gradients of the shape functions, and of course the quadrature weights. For the terms involving the boundary integrals, only shape function values and the quadrature weights are necessary.

          +

          The FEValues objects will evaluate the shape functions for us. For the part of the bilinear form that involves integration on $\Omega$, we'll need the values and gradients of the shape functions, and of course the quadrature weights. For the terms involving the boundary integrals, only shape function values and the quadrature weights are necessary.

            FEValues<dim> fe_values(fe,
            quadrature_formula,
          @@ -664,11 +664,11 @@
            {
            for (unsigned int j = 0; j < dofs_per_cell; ++j)
            {
          -

          At this point, it is important to keep in mind that we are dealing with a finite element system with two components. Due to the way we constructed this FESystem, namely as the Cartesian product of two scalar finite element fields, each shape function has only a single nonzero component (they are, in deal.II lingo, primitive). Hence, each shape function can be viewed as one of the $\phi$'s or $\psi$'s from the introduction, and similarly the corresponding degrees of freedom can be attributed to either $\alpha$ or $\beta$. As we iterate through all the degrees of freedom on the current cell however, they do not come in any particular order, and so we cannot decide right away whether the DoFs with index $i$ and $j$ belong to the real or imaginary part of our solution. On the other hand, if you look at the form of the system matrix in the introduction, this distinction is crucial since it will determine to which block in the system matrix the contribution of the current pair of DoFs will go and hence which quantity we need to compute from the given two shape functions. Fortunately, the FESystem object can provide us with this information, namely it has a function FESystem::system_to_component_index(), that for each local DoF index returns a pair of integers of which the first indicates to which component of the system the DoF belongs. The second integer of the pair indicates which index the DoF has in the scalar base finite element field, but this information is not relevant here. If you want to know more about this function and the underlying scheme behind primitive vector valued elements, take a look at step-8 or the Handling vector valued problems topic, where these topics are explained in depth.

          +

          At this point, it is important to keep in mind that we are dealing with a finite element system with two components. Due to the way we constructed this FESystem, namely as the Cartesian product of two scalar finite element fields, each shape function has only a single nonzero component (they are, in deal.II lingo, primitive). Hence, each shape function can be viewed as one of the $\phi$'s or $\psi$'s from the introduction, and similarly the corresponding degrees of freedom can be attributed to either $\alpha$ or $\beta$. As we iterate through all the degrees of freedom on the current cell however, they do not come in any particular order, and so we cannot decide right away whether the DoFs with index $i$ and $j$ belong to the real or imaginary part of our solution. On the other hand, if you look at the form of the system matrix in the introduction, this distinction is crucial since it will determine to which block in the system matrix the contribution of the current pair of DoFs will go and hence which quantity we need to compute from the given two shape functions. Fortunately, the FESystem object can provide us with this information, namely it has a function FESystem::system_to_component_index(), that for each local DoF index returns a pair of integers of which the first indicates to which component of the system the DoF belongs. The second integer of the pair indicates which index the DoF has in the scalar base finite element field, but this information is not relevant here. If you want to know more about this function and the underlying scheme behind primitive vector valued elements, take a look at step-8 or the Handling vector valued problems topic, where these topics are explained in depth.

            if (fe.system_to_component_index(i).first ==
            fe.system_to_component_index(j).first)
            {
          -

          If both DoFs $i$ and $j$ belong to same component, i.e. their shape functions are both $\phi$'s or both $\psi$'s, the contribution will end up in one of the diagonal blocks in our system matrix, and since the corresponding entries are computed by the same formula, we do not bother if they actually are $\phi$ or $\psi$ shape functions. We can simply compute the entry by iterating over all quadrature points and adding up their contributions, where values and gradients of the shape functions are supplied by our FEValues object.

          +

          If both DoFs $i$ and $j$ belong to same component, i.e. their shape functions are both $\phi$'s or both $\psi$'s, the contribution will end up in one of the diagonal blocks in our system matrix, and since the corresponding entries are computed by the same formula, we do not bother if they actually are $\phi$ or $\psi$ shape functions. We can simply compute the entry by iterating over all quadrature points and adding up their contributions, where values and gradients of the shape functions are supplied by our FEValues object.

            for (unsigned int q_point = 0; q_point < n_q_points;
            ++q_point)
            cell_matrix(i, j) +=
          @@ -703,7 +703,7 @@
            fe.has_support_on_face(i, face_no) &&
            fe.has_support_on_face(j, face_no))

          The check whether shape functions have support on a face is not strictly necessary: if we don't check for it we would simply add up terms to the local cell matrix that happen to be zero because at least one of the shape functions happens to be zero. However, we can save that work by adding the checks above.

          -

          In either case, these DoFs will contribute to the boundary integrals in the off-diagonal blocks of the system matrix. To compute the integral, we loop over all the quadrature points on the face and sum up the contribution weighted with the quadrature weights that the face quadrature rule provides. In contrast to the entries on the diagonal blocks, here it does matter which one of the shape functions is a $\psi$ and which one is a $\phi$, since that will determine the sign of the entry. We account for this by a simple conditional statement that determines the correct sign. Since we already checked that DoF $i$ and $j$ belong to different components, it suffices here to test for one of them to which component it belongs.

          +

          In either case, these DoFs will contribute to the boundary integrals in the off-diagonal blocks of the system matrix. To compute the integral, we loop over all the quadrature points on the face and sum up the contribution weighted with the quadrature weights that the face quadrature rule provides. In contrast to the entries on the diagonal blocks, here it does matter which one of the shape functions is a $\psi$ and which one is a $\phi$, since that will determine the sign of the entry. We account for this by a simple conditional statement that determines the correct sign. Since we already checked that DoF $i$ and $j$ belong to different components, it suffices here to test for one of them to which component it belongs.

            for (unsigned int q_point = 0; q_point < n_face_q_points;
            ++q_point)
            cell_matrix(i, j) +=
          @@ -771,7 +771,7 @@
          void solve(Vector< double > &rhs_and_solution, const bool transpose=false) const

          UltrasoundProblem::output_results

          -

          Here we output our solution $v$ and $w$ as well as the derived quantity $|u|$ in the format specified in the parameter file. Most of the work for deriving $|u|$ from $v$ and $w$ was already done in the implementation of the ComputeIntensity class, so that the output routine is rather straightforward and very similar to what is done in the previous tutorials.

          +

          Here we output our solution $v$ and $w$ as well as the derived quantity $|u|$ in the format specified in the parameter file. Most of the work for deriving $|u|$ from $v$ and $w$ was already done in the implementation of the ComputeIntensity class, so that the output routine is rather straightforward and very similar to what is done in the previous tutorials.

            template <int dim>
            void UltrasoundProblem<dim>::output_results() const
            {
          @@ -799,7 +799,7 @@
           
            std::ofstream output(filename);
           
          -

          The solution vectors $v$ and $w$ are added to the DataOut object in the usual way:

          +

          The solution vectors $v$ and $w$ are added to the DataOut object in the usual way:

            std::vector<std::string> solution_names;
            solution_names.emplace_back("Re_u");
            solution_names.emplace_back("Im_u");
          @@ -937,8 +937,8 @@
    Finite element order
    |u|
    -

    The first two pictures show the real and imaginary parts of $u$, whereas the last shows the intensity $|u|$. One can clearly see that the intensity is focused around the focal point of the lens (0.5, 0.3), and that the focus is rather sharp in $x$-direction but more blurred in $y$-direction, which is a consequence of the geometry of the focusing lens, its finite aperture, and the wave nature of the problem.

    -

    Because colorful graphics are always fun, and to stress the focusing effects some more, here is another set of images highlighting how well the intensity is actually focused in $x$-direction:

    +

    The first two pictures show the real and imaginary parts of $u$, whereas the last shows the intensity $|u|$. One can clearly see that the intensity is focused around the focal point of the lens (0.5, 0.3), and that the focus is rather sharp in $x$-direction but more blurred in $y$-direction, which is a consequence of the geometry of the focusing lens, its finite aperture, and the wave nature of the problem.

    +

    Because colorful graphics are always fun, and to stress the focusing effects some more, here is another set of images highlighting how well the intensity is actually focused in $x$-direction:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-12-27 18:25:19.424946661 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-12-27 18:25:19.432946716 +0000 @@ -158,7 +158,7 @@ \end{align*}" src="form_4002.png"/>

    We will solve this equation on the square, $\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

    -

    If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

    +

    If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

    \begin{align*}
   -\int_\Omega \varphi \Delta u = \int_\Omega \varphi f.
 \end{align*} @@ -171,22 +171,22 @@ = \int_\Omega \varphi f. \end{align*}" src="form_4006.png"/>

    -

    The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

    +

    The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

    \begin{align*}
   (\nabla\varphi, \nabla u)
    = (\varphi, f),
 \end{align*}

    -

    where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

    +

    where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

    Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
-x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

    +x)$" src="form_4010.png"/>, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

    • A mesh on which to define shape functions. You have already seen how to generate and manipulate the objects that describe meshes in step-1 and step-2.
    • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
    • A DoFHandler object that enumerates all the degrees of freedom on the mesh, taking the reference cell description the finite element object provides as the basis. You've also already seen how to do this in step-2.
    • A mapping that tells how the shape functions on the real cell are obtained from the shape functions defined by the finite element class on the reference cell. By default, unless you explicitly say otherwise, deal.II will use a (bi-, tri-)linear mapping for this, so in most cases you don't have to worry about this step.
    -

    Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

    +

    Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

    \begin{align*}
   (\nabla\varphi_i, \nabla u_h)
    = (\varphi_i, f),
@@ -210,7 +210,7 @@
   A U = F,
 \end{align*}

    -

    where the matrix $A$ and the right hand side $F$ are defined as

    +

    where the matrix $A$ and the right hand side $F$ are defined as

    \begin{align*}
   A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j),
   \\
@@ -232,8 +232,8 @@
 <p><a class=

    Assembling the matrix and right hand side vector

    Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

      -
    • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
    • -
    • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

      +

    • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
    • +
    • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

      \begin{align*}
     A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j)
     = \sum_{K \in {\mathbb T}} \int_K \nabla\varphi_i \cdot \nabla \varphi_j,
@@ -256,9 +256,9 @@
     \sum_q \varphi_i(\mathbf x^K_q) f(\mathbf x^K_q) w^K_q,
   \end{align*}

      - where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
    • + where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
    • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
    • -
    • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.
    • +
    • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.

    The process of computing the matrix and right hand side as a sum over all cells (and then a sum over quadrature points) is usually called assembling the linear system, or assembly for short, using the meaning of the word related to assembly line, meaning "the act of putting together a set of pieces, fragments, or elements".

    FEValues really is the central class in the assembly process. One way you can view it is as follows: The FiniteElement and derived classes describe shape functions, i.e., infinite dimensional objects: functions have values at every point. We need this for theoretical reasons because we want to perform our analysis with integrals over functions. However, for a computer, this is a very difficult concept, since they can in general only deal with a finite amount of information, and so we replace integrals by sums over quadrature points that we obtain by mapping (the Mapping object) using points defined on a reference cell (the Quadrature object) onto points on the real cell. In essence, we reduce the problem to one where we only need a finite amount of information, namely shape function values and derivatives, quadrature weights, normal vectors, etc, exclusively at a finite set of points. The FEValues class is the one that brings the three components together and provides this finite set of information on a particular cell $K$. You will see it in action when we assemble the linear system below.

    @@ -267,16 +267,16 @@
    Note
    The preceding overview of all the important steps of any finite element implementation has its counterpart in deal.II: The library can naturally be grouped into a number of "topics" that cover the basic concepts just outlined. You can access these topics through the "Topics" tab at the top of this page. An overview of the most fundamental groups of concepts is also available on the front page of the deal.II manual.

    Solving the linear system

    For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

    -

    The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

    +

    The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

    One can rescue the situation somewhat by realizing that only a relatively small number of entries in the matrix are nonzero – that is, the matrix is sparse. Variations of Gaussian elimination can exploit this, making the process substantially faster; we will use one such method – implemented in the SparseDirectUMFPACK class – in step-29 for the first time, among several others than come after that. These variations of Gaussian elimination might get us to problem sizes on the order of 100,000 or 200,000, but not all that much beyond that.

    -

    Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

    +

    Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

    Finite element codes therefore almost always use iterative solvers such as CG for the solution of the linear systems, and we will do so in this code as well. (We note that the CG method is only usable for matrices that are symmetric and positive definite; for other equations, the matrix may not have these properties and we will have to use other variations of iterative solvers such as BiCGStab or GMRES that are applicable to more general matrices.)

    -

    An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

    +

    An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

    \begin{align*}
   \tau = 10^{-6} \|b\|
 \end{align*}

    -

    is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

    +

    is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

    All of this will be implemented in the Step3::solve() function in this program. As you will see, it is quite simple to set up linear solvers with deal.II: The whole function will have only three lines.

    About the implementation

    Although this is the simplest possible equation you can solve using the finite element method, this program shows the basic structure of most finite element programs and also serves as the template that almost all of the following programs will essentially follow. Specifically, the main class of this program looks like this:

    class Step3
    @@ -502,7 +502,7 @@

    Now it is time to start integration over the cell, which we do by looping over all quadrature points, which we will number by q_index.

      for (const unsigned int q_index : fe_values.quadrature_point_indices())
      {
    -

    First assemble the matrix: For the Laplace problem, the matrix on each cell is the integral over the gradients of shape function i and j. Since we do not integrate, but rather use quadrature, this is the sum over all quadrature points of the integrands times the determinant of the Jacobian matrix at the quadrature point times the weight of this quadrature point. You can get the gradient of shape function $i$ at quadrature point with number q_index by using fe_values.shape_grad(i,q_index); this gradient is a 2-dimensional vector (in fact it is of type Tensor<1,dim>, with here dim=2) and the product of two such vectors is the scalar product, i.e. the product of the two shape_grad function calls is the dot product. This is in turn multiplied by the Jacobian determinant and the quadrature point weight (that one gets together by the call to FEValues::JxW() ). Finally, this is repeated for all shape functions $i$ and $j$:

    +

    First assemble the matrix: For the Laplace problem, the matrix on each cell is the integral over the gradients of shape function i and j. Since we do not integrate, but rather use quadrature, this is the sum over all quadrature points of the integrands times the determinant of the Jacobian matrix at the quadrature point times the weight of this quadrature point. You can get the gradient of shape function $i$ at quadrature point with number q_index by using fe_values.shape_grad(i,q_index); this gradient is a 2-dimensional vector (in fact it is of type Tensor<1,dim>, with here dim=2) and the product of two such vectors is the scalar product, i.e. the product of the two shape_grad function calls is the dot product. This is in turn multiplied by the Jacobian determinant and the quadrature point weight (that one gets together by the call to FEValues::JxW() ). Finally, this is repeated for all shape functions $i$ and $j$:

      for (const unsigned int i : fe_values.dof_indices())
      for (const unsigned int j : fe_values.dof_indices())
      cell_matrix(i, j) +=
    @@ -628,7 +628,7 @@
    Visualization of the solution of step-3
    -

    It shows both the solution and the mesh, elevated above the $x$- $y$ plane based on the value of the solution at each point. Of course the solution here is not particularly exciting, but that is a result of both what the Laplace equation represents and the right hand side $f(\mathbf x)=1$ we have chosen for this program: The Laplace equation describes (among many other uses) the vertical deformation of a membrane subject to an external (also vertical) force. In the current example, the membrane's borders are clamped to a square frame with no vertical variation; a constant force density will therefore intuitively lead to a membrane that simply bulges upward – like the one shown above.

    +

    It shows both the solution and the mesh, elevated above the $x$- $y$ plane based on the value of the solution at each point. Of course the solution here is not particularly exciting, but that is a result of both what the Laplace equation represents and the right hand side $f(\mathbf x)=1$ we have chosen for this program: The Laplace equation describes (among many other uses) the vertical deformation of a membrane subject to an external (also vertical) force. In the current example, the membrane's borders are clamped to a square frame with no vertical variation; a constant force density will therefore intuitively lead to a membrane that simply bulges upward – like the one shown above.

    VisIt and Paraview both allow playing with various kinds of visualizations of the solution. Several video lectures show how to use these programs. See also video lecture 11, video lecture 32.

    Possibilities for extensions

    If you want to play around a little bit with this program, here are a few suggestions:

    @@ -929,7 +929,7 @@
    solution = np.array(file["/solution"])
    x, y = nodes.T
    -

    The following stores the $x$ and $y$ coordinates of each node of each cell in one flat array.

    cell_x = x[cells.flatten()]
    +

    The following stores the $x$ and $y$ coordinates of each node of each cell in one flat array.

    cell_x = x[cells.flatten()]
    cell_y = y[cells.flatten()]

    The following tags the cell ids. Each four entries correspond to one cell. Then we collect the coordinates and ids into a data frame

    n_cells = cells.shape[0]
    cell_ids = np.repeat(np.arange(n_cells), 4)
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-12-27 18:25:19.500947183 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-12-27 18:25:19.504947211 +0000 @@ -231,9 +231,9 @@

    Motivation

    Adaptive local refinement is used to obtain fine meshes which are well adapted to solving the problem at hand efficiently. In short, the size of cells which produce a large error is reduced to obtain a better approximation of the solution to the problem at hand. However, a lot of problems contain anisotropic features. Prominent examples are shocks or boundary layers in compressible viscous flows. An efficient mesh approximates these features with cells of higher aspect ratio which are oriented according to the mentioned features. Using only isotropic refinement, the aspect ratios of the original mesh cells are preserved, as they are inherited by the children of a cell. Thus, starting from an isotropic mesh, a boundary layer will be refined in order to catch the rapid variation of the flow field in the wall normal direction, thus leading to cells with very small edge lengths both in normal and tangential direction. Usually, much higher edge lengths in tangential direction and thus significantly less cells could be used without a significant loss in approximation accuracy. An anisotropic refinement process can modify the aspect ratio from mother to child cells by a factor of two for each refinement step. In the course of several refinements, the aspect ratio of the fine cells can be optimized, saving a considerable number of cells and correspondingly degrees of freedom and thus computational resources, memory as well as CPU time.

    Implementation

    -

    Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

    -

    In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

    -

    Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

    +

    Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

    +

    In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

    +

    Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

    After anisotropic refinement, a coarser neighbor is not necessarily exactly one level below ours, but can pretty much have any level relative to the current one; in fact, it can even be on a higher level even though it is coarser. Thus the decisions have to be made on a different basis, whereas the intention of the decisions stays the same.

    In the following, we will discuss the cases that can happen when we want to compute contributions to the matrix (or right hand side) of the form

    \[
@@ -244,7 +244,7 @@
 <ul>
 <li>
 <p class=Finer neighbor: If we are on an active cell and want to integrate over a face $f\subset \partial K$, the first possibility is that the neighbor behind this face is more refined, i.e. has children occupying only part of the common face. In this case, the face under consideration has to be a refined one, which can determine by asking if (face->has_children()). If this is true, we need to loop over all subfaces and get the neighbors' child behind this subface, so that we can reinit an FEFaceValues object with the neighbor and an FESubfaceValues object with our cell and the respective subface.

    -

    For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

      +

      For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

      • If the neighbor is refined more than once anisotropically, it might be that here are not two or four but actually three subfaces to consider. Imagine the following refinement process of the (two-dimensional) face of the (three-dimensional) neighbor cell we are considering: first the face is refined along x, later on only the left subface is refined along y.

        -------* *---*---* *---*---*
        | | | | | | | |
        @@ -266,7 +266,7 @@
        # # # + + +
        # ## + ++
        ############# +++++++++++++
        -

        Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

        +

    Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

    However, fortunately, CellAccessor::neighbor_child_on_subface() takes care of these situations by itself, if you loop over the correct number of subfaces, in the above example this is two. The FESubfaceValues<dim>::reinit function takes care of this too, so that the resulting state is always correct. There is one little caveat, however: For reiniting the neighbors FEFaceValues object you need to know the index of the face that points toward the current cell. Usually you assume that the neighbor you get directly is as coarse or as fine as you, if it has children, thus this information can be obtained with CellAccessor::neighbor_of_neighbor(). If the neighbor is coarser, however, you would have to use the first value in CellAccessor::neighbor_of_coarser_neighbor() instead. In order to make this easy for you, there is CellAccessor::neighbor_face_no() which does the correct thing for you and returns the desired result.

    @@ -307,12 +307,12 @@

    This approach is similar to the one we have used in step-27 for hp-refinement and has the great advantage of flexibility: Any error indicator can be used in the anisotropic process, i.e. if you have quite involved a posteriori goal-oriented error indicators available you can use them as easily as a simple Kelly error estimator. The anisotropic part of the refinement process is not influenced by this choice. Furthermore, simply leaving out the third and forth steps leads to the same isotropic refinement you used to get before any anisotropic changes in deal.II or your application program. As a last advantage, working only on cells flagged for refinement results in a faster evaluation of the anisotropic indicator, which can become noticeable on finer meshes with a lot of cells if the indicator is quite involved.

    Here, we use a very simple approach which is only applicable to DG methods. The general idea is quite simple: DG methods allow the discrete solution to jump over the faces of a cell, whereas it is smooth within each cell. Of course, in the limit we expect that the jumps tend to zero as we refine the mesh and approximate the true solution better and better. Thus, a large jump across a given face indicates that the cell should be refined (at least) orthogonally to that face, whereas a small jump does not lead to this conclusion. It is possible, of course, that the exact solution is not smooth and that it also features a jump. In that case, however, a large jump over one face indicates, that this face is more or less parallel to the jump and in the vicinity of it, thus again we would expect a refinement orthogonal to the face under consideration to be effective.

    -

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    +

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    \[
 K_j = \frac{\sum_{i=1}^2 \int_{f_i^j}|[u]| dx}{\sum_{i=1}^2 |f_i^j|} .
 \]

    -

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    +

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    Such a criterion is easily generalized to systems of equations: the absolute value of the jump would be replaced by an appropriate norm of the vector-valued jump.

    The problem

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    @@ -394,7 +394,7 @@
    virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
    Definition point.h:111
    #define AssertDimension(dim1, dim2)
    -

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

    +

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

      void value_list(const std::vector<Point<dim>> &points,
      std::vector<Point<dim>> &values) const
      {
    @@ -1350,7 +1350,7 @@

    We see, that the solution on the anisotropically refined mesh is very similar to the solution obtained on the isotropically refined mesh. Thus the anisotropic indicator seems to effectively select the appropriate cells for anisotropic refinement.

    -

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    +

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    It might seem that the necessary alignment of anisotropic features and the coarse mesh can decrease performance significantly for real world problems. That is not wrong in general: If one were, for example, to apply anisotropic refinement to problems in which shocks appear (e.g., the equations solved in step-69), then it many cases the shock is not aligned with the mesh and anisotropic refinement will help little unless one also introduces techniques to move the mesh in alignment with the shocks. On the other hand, many steep features of solutions are due to boundary layers. In those cases, the mesh is already aligned with the anisotropic features because it is of course aligned with the boundary itself, and anisotropic refinement will almost always increase the efficiency of computations on adapted grids for these cases.

    The plain program

    /* ------------------------------------------------------------------------
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-12-27 18:25:19.624948035 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-12-27 18:25:19.628948062 +0000 @@ -200,11 +200,11 @@ \nabla \cdot \kappa \nabla T &=& \gamma. \end{eqnarray*}" src="form_4072.png"/>

    -

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems topic). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
+<p> These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the <a class=Handling vector valued problems topic). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
 [(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    -

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    -

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    +

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    +

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    Note
    If you are interested in using the program as the basis for your own experiments, you will also want to take a look at its continuation in step-32. Furthermore, step-32 later was developed into the much larger open source code ASPECT (see https://aspect.geodynamics.org/ ) that can solve realistic problems and that you may want to investigate before trying to morph step-31 into something that can solve whatever you want to solve.
    @@ -212,10 +212,10 @@

    Since the Boussinesq equations are derived under the assumption that inertia of the fluid's motion does not play a role, the flow field is at each time entirely determined by buoyancy difference at that time, not by the flow field at previous times. This is reflected by the fact that the first two equations above are the steady state Stokes equation that do not contain a time derivative. Consequently, we do not need initial conditions for either velocities or pressure. On the other hand, the temperature field does satisfy an equation with a time derivative, so we need initial conditions for $T$.

    As for boundary conditions: if $\kappa>0$ then the temperature satisfies a second order differential equation that requires boundary data all around the boundary for all times. These can either be a prescribed boundary temperature $T|_{\partial\Omega}=T_b$ (Dirichlet boundary conditions), or a prescribed thermal flux $\mathbf{n}\cdot\kappa\nabla
 T|_{\partial\Omega}=\phi$; in this program, we will use an insulated boundary condition, i.e., prescribe no thermal flux: $\phi=0$.

    -

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
+<p>Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions <picture><source srcset=$\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
 u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    Solution approach

    -

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    +

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    \begin{eqnarray*}
   \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\
   \nabla\cdot \mathbf u &=& f,
@@ -238,7 +238,7 @@
   \nabla \cdot {\mathbf u}^{n-1} &=& 0,
 \end{eqnarray*}

    -

    and then the temperature equation with an extrapolated velocity field to time $n$.

    +

    and then the temperature equation with an extrapolated velocity field to time $n$.

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    \begin{eqnarray*}
   \frac 32 T^n
@@ -254,7 +254,7 @@
   k\gamma.
 \end{eqnarray*}

    -

    Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity ${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
+<p> Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity <picture><source srcset=${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
 \approx T^{n-1} + k_n \frac{\partial T}{\partial t} \approx T^{n-1} + k_n
 \frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    @@ -297,7 +297,7 @@ \end{eqnarray*}" src="form_4101.png"/>

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
-\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    +\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$" src="form_4102.png"/> denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    Weak form and space discretization for the Stokes part

    Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    @@ -360,7 +360,7 @@ T^{\alpha-1} \end{eqnarray*}" src="form_4116.png"/>

    -

    where we will later choose the stabilization exponent $\alpha$ from within the range $[1,2]$. Note that $R_\alpha(T)$ will be zero if $T$ satisfies the temperature equation, since then the term in parentheses will be zero. Multiplying terms out, we get the following, entirely equivalent form:

    +

    where we will later choose the stabilization exponent $\alpha$ from within the range $[1,2]$. Note that $R_\alpha(T)$ will be zero if $T$ satisfies the temperature equation, since then the term in parentheses will be zero. Multiplying terms out, we get the following, entirely equivalent form:

    \begin{eqnarray*}
   R_\alpha(T)
   =
@@ -397,11 +397,11 @@
  c_R\ \|\mathbf{u}\|_{L^\infty(\Omega)} \ \mathrm{var}(T)
  \ |\mathrm{diam}(\Omega)|^{\alpha-2}$, where $\mathrm{var}(T)=\max_\Omega T - \min_\Omega T$ is the range of present temperature values (remember that buoyancy is driven by temperature variations, not the absolute temperature) and $c_R$ is a dimensionless constant. To understand why this method works consider this: If on a particular cell $K$ the temperature field is smooth, then we expect the residual to be small there (in fact to be on the order of ${\cal O}(h_K)$) and the stabilization term that injects artificial diffusion will there be of size $h_K^{\alpha+1}$ — i.e., rather small, just as we hope it to be when no additional diffusion is necessary. On the other hand, if we are on or close to a discontinuity of the temperature field, then the residual will be large; the minimum operation in the definition of $\nu_\alpha(T)$ will then ensure that the stabilization has size $h_K$ — the optimal amount of artificial viscosity to ensure stability of the scheme.

    Whether or not this scheme really works is a good question. Computations by Guermond and Popov have shown that this form of stabilization actually performs much better than most of the other stabilization schemes that are around (for example streamline diffusion, to name only the simplest one). Furthermore, for $\alpha\in
-[1,2)$ they can even prove that it produces better convergence orders for the linear transport equation than for example streamline diffusion. For $\alpha=2$, no theoretical results are currently available, but numerical tests indicate that the results are considerably better than for $\alpha=1$.

    +[1,2)$" src="form_4128.png"/> they can even prove that it produces better convergence orders for the linear transport equation than for example streamline diffusion. For $\alpha=2$, no theoretical results are currently available, but numerical tests indicate that the results are considerably better than for $\alpha=1$.

    A more practical question is how to introduce this artificial diffusion into the equations we would like to solve. Note that the numerical viscosity $\nu(T)$ is temperature-dependent, so the equation we want to solve is nonlinear in $T$ — not what one desires from a simple method to stabilize an equation, and even less so if we realize that $\nu(T)$ is nondifferentiable in $T$. However, there is no reason to despair: we still have to discretize in time and we can treat the term explicitly.

    In the definition of the stabilization parameter, we approximate the time derivative by $\frac{\partial T}{\partial t} \approx
 \frac{T^{n-1}-T^{n-2}}{k^{n-1}}$. This approximation makes only use of available time data and this is the reason why we need to store data of two previous time steps (which enabled us to use the BDF-2 scheme without additional storage cost). We could now simply evaluate the rest of the terms at $t_{n-1}$, but then the discrete residual would be nothing else than a backward Euler approximation, which is only first order accurate. So, in case of smooth solutions, the residual would be still of the order $h$, despite the second order time accuracy in the outer BDF-2 scheme and the spatial FE discretization. This is certainly not what we want to have (in fact, we desired to have small residuals in regions where the solution behaves nicely), so a bit more care is needed. The key to this problem is to observe that the first derivative as we constructed it is actually centered at $t_{n-\frac{3}{2}}$. We get the desired second order accurate residual calculation if we evaluate all spatial terms at $t_{n-\frac{3}{2}}$ by using the approximation $\frac 12 T^{n-1}+\frac 12 T^{n-2}$, which means that we calculate the nonlinear viscosity as a function of this intermediate temperature, $\nu_\alpha =
-\nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right)$. Note that this evaluation of the residual is nothing else than a Crank-Nicolson scheme, so we can be sure that now everything is alright. One might wonder whether it is a problem that the numerical viscosity now is not evaluated at time $n$ (as opposed to the rest of the equation). However, this offset is uncritical: For smooth solutions, $\nu_\alpha$ will vary continuously, so the error in time offset is $k$ times smaller than the nonlinear viscosity itself, i.e., it is a small higher order contribution that is left out. That's fine because the term itself is already at the level of discretization error in smooth regions.

    +\nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right)$" src="form_4133.png"/>. Note that this evaluation of the residual is nothing else than a Crank-Nicolson scheme, so we can be sure that now everything is alright. One might wonder whether it is a problem that the numerical viscosity now is not evaluated at time $n$ (as opposed to the rest of the equation). However, this offset is uncritical: For smooth solutions, $\nu_\alpha$ will vary continuously, so the error in time offset is $k$ times smaller than the nonlinear viscosity itself, i.e., it is a small higher order contribution that is left out. That's fine because the term itself is already at the level of discretization error in smooth regions.

    Using the BDF-2 scheme introduced above, this yields for the simpler case of uniform time steps of size $k$:

    \begin{eqnarray*}
   \frac 32 T^n
@@ -538,7 +538,7 @@
   \end{array}\right)
 \end{eqnarray*}

    -

    where $S$ is the Schur complement of the Stokes operator $S=B^TA^{-1}B$. Of course, this preconditioner is not useful because we can't form the various inverses of matrices, but we can use the following as a preconditioner:

    +

    where $S$ is the Schur complement of the Stokes operator $S=B^TA^{-1}B$. Of course, this preconditioner is not useful because we can't form the various inverses of matrices, but we can use the following as a preconditioner:

    \begin{eqnarray*}
   \tilde P^{-1}
   =
@@ -547,15 +547,15 @@
   \end{array}\right)
 \end{eqnarray*}

    -

    where $\tilde A^{-1},\tilde S^{-1}$ are approximations to the inverse matrices. In particular, it turned out that $S$ is spectrally equivalent to the mass matrix and consequently replacing $\tilde
-S^{-1}$ by a CG solver applied to the mass matrix on the pressure space was a good choice. In a small deviation from step-22, we here have a coefficient $\eta$ in the momentum equation, and by the same derivation as there we should arrive at the conclusion that it is the weighted mass matrix with entries $\tilde S_{ij}=(\eta^{-1}\varphi_i,\varphi_j)$ that we should be using.

    +

    where $\tilde A^{-1},\tilde S^{-1}$ are approximations to the inverse matrices. In particular, it turned out that $S$ is spectrally equivalent to the mass matrix and consequently replacing $\tilde
+S^{-1}$ by a CG solver applied to the mass matrix on the pressure space was a good choice. In a small deviation from step-22, we here have a coefficient $\eta$ in the momentum equation, and by the same derivation as there we should arrive at the conclusion that it is the weighted mass matrix with entries $\tilde S_{ij}=(\eta^{-1}\varphi_i,\varphi_j)$ that we should be using.

    It was more complicated to come up with a good replacement $\tilde
 A^{-1}$, which corresponds to the discretized symmetric Laplacian of the vector-valued velocity field, i.e. $A_{ij} = (\varepsilon {\mathbf v}_i, 2\eta \varepsilon ({\mathbf
 v}_j))$. In step-22 we used a sparse LU decomposition (using the SparseDirectUMFPACK class) of $A$ for $\tilde A^{-1}$ — the perfect preconditioner — in 2d, but for 3d memory and compute time is not usually sufficient to actually compute this decomposition; consequently, we only use an incomplete LU decomposition (ILU, using the SparseILU class) in 3d.

    For this program, we would like to go a bit further. To this end, note that the symmetrized bilinear form on vector fields, $(\varepsilon {\mathbf v}_i, 2 \eta \varepsilon ({\mathbf v}_j))$ is not too far away from the nonsymmetrized version, $(\nabla {\mathbf v}_i, \eta \nabla {\mathbf v}_j)
 = \sum_{k,l=1}^d
   (\partial_k ({\mathbf v}_i)_l, \eta \partial_k ({\mathbf v}_j)_l)
-$ (note that the factor 2 has disappeared in this form). The latter, however, has the advantage that the dim vector components of the test functions are not coupled (well, almost, see below), i.e., the resulting matrix is block-diagonal: one block for each vector component, and each of these blocks is equal to the Laplace matrix for this vector component. So assuming we order degrees of freedom in such a way that first all $x$-components of the velocity are numbered, then the $y$-components, and then the $z$-components, then the matrix $\hat A$ that is associated with this slightly different bilinear form has the form

    +$" src="form_4153.png"/> (note that the factor 2 has disappeared in this form). The latter, however, has the advantage that the dim vector components of the test functions are not coupled (well, almost, see below), i.e., the resulting matrix is block-diagonal: one block for each vector component, and each of these blocks is equal to the Laplace matrix for this vector component. So assuming we order degrees of freedom in such a way that first all $x$-components of the velocity are numbered, then the $y$-components, and then the $z$-components, then the matrix $\hat A$ that is associated with this slightly different bilinear form has the form

    \begin{eqnarray*}
   \hat A =
   \left(\begin{array}{ccc}
@@ -613,8 +613,8 @@
   \end{array}\right).
 \end{eqnarray*}

    -

    The problem with this is: We never use the whole matrix at the same time. In fact, it never really exists at the same time: As explained above, $K$ and $F_T$ depend on the already computed solution $U^n$, in the first case through the time step (that depends on $U^n$ because it has to satisfy a CFL condition). So we can only assemble it once we've already solved the top left $2\times 2$ block Stokes system, and once we've moved on to the temperature equation we don't need the Stokes part any more; the fact that we build an object for a matrix that never exists as a whole in memory at any given time led us to jumping through some hoops in step-21, so let's not repeat this sort of error. Furthermore, we don't actually build the matrix $C$: Because by the time we get to the temperature equation we already know $U^n$, and because we have to assemble the right hand side $F_T$ at this time anyway, we simply move the term $CU^n$ to the right hand side and assemble it along with all the other terms there. What this means is that there does not remain a part of the matrix where temperature variables and Stokes variables couple, and so a global enumeration of all degrees of freedom is no longer important: It is enough if we have an enumeration of all Stokes degrees of freedom, and of all temperature degrees of freedom independently.

    -

    In essence, there is consequently not much use in putting everything into a block matrix (though there are of course the same good reasons to do so for the $2\times 2$ Stokes part), or, for that matter, in putting everything into the same DoFHandler object.

    +

    The problem with this is: We never use the whole matrix at the same time. In fact, it never really exists at the same time: As explained above, $K$ and $F_T$ depend on the already computed solution $U^n$, in the first case through the time step (that depends on $U^n$ because it has to satisfy a CFL condition). So we can only assemble it once we've already solved the top left $2\times 2$ block Stokes system, and once we've moved on to the temperature equation we don't need the Stokes part any more; the fact that we build an object for a matrix that never exists as a whole in memory at any given time led us to jumping through some hoops in step-21, so let's not repeat this sort of error. Furthermore, we don't actually build the matrix $C$: Because by the time we get to the temperature equation we already know $U^n$, and because we have to assemble the right hand side $F_T$ at this time anyway, we simply move the term $CU^n$ to the right hand side and assemble it along with all the other terms there. What this means is that there does not remain a part of the matrix where temperature variables and Stokes variables couple, and so a global enumeration of all degrees of freedom is no longer important: It is enough if we have an enumeration of all Stokes degrees of freedom, and of all temperature degrees of freedom independently.

    +

    In essence, there is consequently not much use in putting everything into a block matrix (though there are of course the same good reasons to do so for the $2\times 2$ Stokes part), or, for that matter, in putting everything into the same DoFHandler object.

    But are there downsides to doing so? These exist, though they may not be obvious at first. The main problem is that if we need to create one global finite element that contains velocity, pressure, and temperature shape functions, and use this to initialize the DoFHandler. But we also use this finite element object to initialize all FEValues or FEFaceValues objects that we use. This may not appear to be that big a deal, but imagine what happens when, for example, we evaluate the residual $
   R_\alpha(T)
   =
@@ -854,8 +854,8 @@
 </p>
 <p> which indeed is very simple. A GMRES solver based on exact matrices would converge in one iteration, since all eigenvalues are equal (any Krylov method takes at most as many iterations as there are distinct eigenvalues). Such a preconditioner for the blocked Stokes system has been proposed by Silvester and Wathen ( -

    Replacing $P$ by $\tilde{P}$ keeps that spirit alive: the product $P^{-1} A$ will still be close to a matrix with eigenvalues 1 with a distribution that does not depend on the problem size. This lets us hope to be able to get a number of GMRES iterations that is problem-size independent.

    -

    The deal.II users who have already gone through the step-20 and step-22 tutorials can certainly imagine how we're going to implement this. We replace the exact inverse matrices in $P^{-1}$ by some approximate inverses built from the InverseMatrix class, and the inverse Schur complement will be approximated by the pressure mass matrix $M_p$ (weighted by $\eta^{-1}$ as mentioned in the introduction). As pointed out in the results section of step-22, we can replace the exact inverse of $A$ by just the application of a preconditioner, in this case on a vector Laplace matrix as was explained in the introduction. This does increase the number of (outer) GMRES iterations, but is still significantly cheaper than an exact inverse, which would require between 20 and 35 CG iterations for each outer solver step (using the AMG preconditioner).

    +

    Replacing $P$ by $\tilde{P}$ keeps that spirit alive: the product $P^{-1} A$ will still be close to a matrix with eigenvalues 1 with a distribution that does not depend on the problem size. This lets us hope to be able to get a number of GMRES iterations that is problem-size independent.

    +

    The deal.II users who have already gone through the step-20 and step-22 tutorials can certainly imagine how we're going to implement this. We replace the exact inverse matrices in $P^{-1}$ by some approximate inverses built from the InverseMatrix class, and the inverse Schur complement will be approximated by the pressure mass matrix $M_p$ (weighted by $\eta^{-1}$ as mentioned in the introduction). As pointed out in the results section of step-22, we can replace the exact inverse of $A$ by just the application of a preconditioner, in this case on a vector Laplace matrix as was explained in the introduction. This does increase the number of (outer) GMRES iterations, but is still significantly cheaper than an exact inverse, which would require between 20 and 35 CG iterations for each outer solver step (using the AMG preconditioner).

    Having the above explanations in mind, we define a preconditioner class with a vmult functionality, which is all we need for the interaction with the usual solver functions further below in the program code.

    First the declarations. These are similar to the definition of the Schur complement in step-20, with the difference that we need some more preconditioners in the constructor and that the matrices we use here are built upon Trilinos:

      template <class PreconditionerTypeA, class PreconditionerTypeMp>
    @@ -904,9 +904,9 @@
     
     
    IndexSet complete_index_set(const IndexSet::size_type N)
    Definition index_set.h:1204
    -

    Next is the vmult function. We implement the action of $P^{-1}$ as described above in three successive steps. In formulas, we want to compute $Y=P^{-1}X$ where $X,Y$ are both vectors with two block components.

    +

    Next is the vmult function. We implement the action of $P^{-1}$ as described above in three successive steps. In formulas, we want to compute $Y=P^{-1}X$ where $X,Y$ are both vectors with two block components.

    The first step multiplies the velocity part of the vector by a preconditioner of the matrix $A$, i.e., we compute $Y_0={\tilde
-   A}^{-1}X_0$. The resulting velocity vector is then multiplied by $B$ and subtracted from the pressure, i.e., we want to compute $X_1-BY_0$. This second step only acts on the pressure vector and is accomplished by the residual function of our matrix classes, except that the sign is wrong. Consequently, we change the sign in the temporary pressure vector and finally multiply by the inverse pressure mass matrix to get the final pressure vector, completing our work on the Stokes preconditioner:

    + A}^{-1}X_0$" src="form_4189.png"/>. The resulting velocity vector is then multiplied by $B$ and subtracted from the pressure, i.e., we want to compute $X_1-BY_0$. This second step only acts on the pressure vector and is accomplished by the residual function of our matrix classes, except that the sign is wrong. Consequently, we change the sign in the temporary pressure vector and finally multiply by the inverse pressure mass matrix to get the final pressure vector, completing our work on the Stokes preconditioner:

      template <class PreconditionerTypeA, class PreconditionerTypeMp>
      void
      BlockSchurPreconditioner<PreconditionerTypeA, PreconditionerTypeMp>::vmult(
    @@ -1014,7 +1014,7 @@

    BoussinesqFlowProblem class implementation

    BoussinesqFlowProblem::BoussinesqFlowProblem

    -

    The constructor of this class is an extension of the constructor in step-22. We need to add the various variables that concern the temperature. As discussed in the introduction, we are going to use $Q_2^d\times Q_1$ (Taylor-Hood) elements again for the Stokes part, and $Q_2$ elements for the temperature. However, by using variables that store the polynomial degree of the Stokes and temperature finite elements, it is easy to consistently modify the degree of the elements as well as all quadrature formulas used on them downstream. Moreover, we initialize the time stepping as well as the options for matrix assembly and preconditioning:

    +

    The constructor of this class is an extension of the constructor in step-22. We need to add the various variables that concern the temperature. As discussed in the introduction, we are going to use $Q_2^d\times Q_1$ (Taylor-Hood) elements again for the Stokes part, and $Q_2$ elements for the temperature. However, by using variables that store the polynomial degree of the Stokes and temperature finite elements, it is easy to consistently modify the degree of the elements as well as all quadrature formulas used on them downstream. Moreover, we initialize the time stepping as well as the options for matrix assembly and preconditioning:

      template <int dim>
      BoussinesqFlowProblem<dim>::BoussinesqFlowProblem()
      : triangulation(Triangulation<dim>::maximum_smoothing)
    @@ -1079,7 +1079,7 @@
    ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::get_extrapolated_temperature_range

    -

    Next a function that determines the minimum and maximum temperature at quadrature points inside $\Omega$ when extrapolated from the two previous time steps to the current one. We need this information in the computation of the artificial viscosity parameter $\nu$ as discussed in the introduction.

    +

    Next a function that determines the minimum and maximum temperature at quadrature points inside $\Omega$ when extrapolated from the two previous time steps to the current one. We need this information in the computation of the artificial viscosity parameter $\nu$ as discussed in the introduction.

    The formula for the extrapolated temperature is $\left(1+\frac{k_n}{k_{n-1}} \right)T^{n-1} + \frac{k_n}{k_{n-1}}
    T^{n-2}$. The way to compute it is to loop over all quadrature points and update the maximum and minimum value if the current value is bigger/smaller than the previous one. We initialize the variables that store the max and min before the loop over all quadrature points by the smallest and the largest number representable as a double. Then we know for a fact that it is larger/smaller than the minimum/maximum and that the loop over all quadrature points is ultimately going to update the initial value with the correct one.

    The only other complication worth mentioning here is that in the first time step, $T^{k-2}$ is not yet available of course. In that case, we can only use $T^{k-1}$ which we have from the initial temperature. As quadrature points, we use the same choice as in the previous function though with the difference that now the number of repetitions is determined by the polynomial degree of the temperature field.

    @@ -1149,8 +1149,8 @@
     
    ::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::compute_viscosity

    -

    The last of the tool functions computes the artificial viscosity parameter $\nu|_K$ on a cell $K$ as a function of the extrapolated temperature, its gradient and Hessian (second derivatives), the velocity, the right hand side $\gamma$ all on the quadrature points of the current cell, and various other parameters as described in detail in the introduction.

    -

    There are some universal constants worth mentioning here. First, we need to fix $\beta$; we choose $\beta=0.017\cdot dim$, a choice discussed in detail in the results section of this tutorial program. The second is the exponent $\alpha$; $\alpha=1$ appears to work fine for the current program, even though some additional benefit might be expected from choosing $\alpha = 2$. Finally, there is one thing that requires special casing: In the first time step, the velocity equals zero, and the formula for $\nu|_K$ is not defined. In that case, we return $\nu|_K=5\cdot 10^3
+<p>The last of the tool functions computes the artificial viscosity parameter <picture><source srcset=$\nu|_K$ on a cell $K$ as a function of the extrapolated temperature, its gradient and Hessian (second derivatives), the velocity, the right hand side $\gamma$ all on the quadrature points of the current cell, and various other parameters as described in detail in the introduction.

    +

    There are some universal constants worth mentioning here. First, we need to fix $\beta$; we choose $\beta=0.017\cdot dim$, a choice discussed in detail in the results section of this tutorial program. The second is the exponent $\alpha$; $\alpha=1$ appears to work fine for the current program, even though some additional benefit might be expected from choosing $\alpha = 2$. Finally, there is one thing that requires special casing: In the first time step, the velocity equals zero, and the formula for $\nu|_K$ is not defined. In that case, we return $\nu|_K=5\cdot 10^3
    \cdot h_K$, a choice admittedly more motivated by heuristics than anything else (it is in the same order of magnitude, however, as the value returned for most cells on the second time step).

    The rest of the function should be mostly obvious based on the material discussed in the introduction:

      template <int dim>
    @@ -1220,7 +1220,7 @@
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::setup_dofs

    This is the function that sets up the DoFHandler objects we have here (one for the Stokes part and one for the temperature part) as well as set to the right sizes the various objects required for the linear algebra in this program. Its basic operations are similar to what we do in step-22.

    -

    The body of the function first enumerates all degrees of freedom for the Stokes and temperature systems. For the Stokes part, degrees of freedom are then sorted to ensure that velocities precede pressure DoFs so that we can partition the Stokes matrix into a $2\times 2$ matrix. As a difference to step-22, we do not perform any additional DoF renumbering. In that program, it paid off since our solver was heavily dependent on ILU's, whereas we use AMG here which is not sensitive to the DoF numbering. The IC preconditioner for the inversion of the pressure mass matrix would of course take advantage of a Cuthill-McKee like renumbering, but its costs are low compared to the velocity portion, so the additional work does not pay off.

    +

    The body of the function first enumerates all degrees of freedom for the Stokes and temperature systems. For the Stokes part, degrees of freedom are then sorted to ensure that velocities precede pressure DoFs so that we can partition the Stokes matrix into a $2\times 2$ matrix. As a difference to step-22, we do not perform any additional DoF renumbering. In that program, it paid off since our solver was heavily dependent on ILU's, whereas we use AMG here which is not sensitive to the DoF numbering. The IC preconditioner for the inversion of the pressure mass matrix would of course take advantage of a Cuthill-McKee like renumbering, but its costs are low compared to the velocity portion, so the additional work does not pay off.

    We then proceed with the generation of the hanging node constraints that arise from adaptive grid refinement for both DoFHandler objects. For the velocity, we impose no-flux boundary conditions $\mathbf{u}\cdot
    \mathbf{n}=0$ by adding constraints to the object that already stores the hanging node constraints matrix. The second parameter in the function describes the first of the velocity components in the total dof vector, which is zero here. The variable no_normal_flux_boundaries denotes the boundary indicators for which to set the no flux boundary conditions; here, this is boundary indicator zero.

    After having done so, we count the number of degrees of freedom in the various blocks:

    @@ -1272,7 +1272,7 @@
    void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
    std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())

    The next step is to create the sparsity pattern for the Stokes and temperature system matrices as well as the preconditioner matrix from which we build the Stokes preconditioner. As in step-22, we choose to create the pattern by using the blocked version of DynamicSparsityPattern.

    -

    So, we first release the memory stored in the matrices, then set up an object of type BlockDynamicSparsityPattern consisting of $2\times 2$ blocks (for the Stokes system matrix and preconditioner) or DynamicSparsityPattern (for the temperature part). We then fill these objects with the nonzero pattern, taking into account that for the Stokes system matrix, there are no entries in the pressure-pressure block (but all velocity vector components couple with each other and with the pressure). Similarly, in the Stokes preconditioner matrix, only the diagonal blocks are nonzero, since we use the vector Laplacian as discussed in the introduction. This operator only couples each vector component of the Laplacian with itself, but not with the other vector components. (Application of the constraints resulting from the no-flux boundary conditions will couple vector components at the boundary again, however.)

    +

    So, we first release the memory stored in the matrices, then set up an object of type BlockDynamicSparsityPattern consisting of $2\times 2$ blocks (for the Stokes system matrix and preconditioner) or DynamicSparsityPattern (for the temperature part). We then fill these objects with the nonzero pattern, taking into account that for the Stokes system matrix, there are no entries in the pressure-pressure block (but all velocity vector components couple with each other and with the pressure). Similarly, in the Stokes preconditioner matrix, only the diagonal blocks are nonzero, since we use the vector Laplacian as discussed in the introduction. This operator only couples each vector component of the Laplacian with itself, but not with the other vector components. (Application of the constraints resulting from the no-flux boundary conditions will couple vector components at the boundary again, however.)

    When generating the sparsity pattern, we directly apply the constraints from hanging nodes and no-flux boundary conditions. This approach was already used in step-27, but is different from the one in early tutorial programs where we first built the original sparsity pattern and only then added the entries resulting from constraints. The reason for doing so is that later during assembly we are going to distribute the constraints immediately when transferring local to global dofs. Consequently, there will be no data written at positions of constrained degrees of freedom, so we can let the DoFTools::make_sparsity_pattern function omit these entries by setting the last Boolean flag to false. Once the sparsity pattern is ready, we can use it to initialize the Trilinos matrices. Since the Trilinos matrices store the sparsity pattern internally, there is no need to keep the sparsity pattern around after the initialization of the matrix.

      stokes_partitioning.resize(2);
      stokes_partitioning[0] = complete_index_set(n_u);
    @@ -1883,7 +1883,7 @@
      temperature_solution = old_temperature_solution;
     
    double minimal_cell_diameter(const Triangulation< dim, spacedim > &triangulation, const Mapping< dim, spacedim > &mapping=(ReferenceCells::get_hypercube< dim >() .template get_default_linear_mapping< dim, spacedim >()))
    -

    Next we set up the temperature system and the right hand side using the function assemble_temperature_system(). Knowing the matrix and right hand side of the temperature equation, we set up a preconditioner and a solver. The temperature matrix is a mass matrix (with eigenvalues around one) plus a Laplace matrix (with eigenvalues between zero and $ch^{-2}$) times a small number proportional to the time step $k_n$. Hence, the resulting symmetric and positive definite matrix has eigenvalues in the range $[1,1+k_nh^{-2}]$ (up to constants). This matrix is only moderately ill conditioned even for small mesh sizes and we get a reasonably good preconditioner by simple means, for example with an incomplete Cholesky decomposition preconditioner (IC) as we also use for preconditioning the pressure mass matrix solver. As a solver, we choose the conjugate gradient method CG. As before, we tell the solver to use Trilinos vectors via the template argument TrilinosWrappers::MPI::Vector. Finally, we solve, distribute the hanging node constraints and write out the number of iterations.

    +

    Next we set up the temperature system and the right hand side using the function assemble_temperature_system(). Knowing the matrix and right hand side of the temperature equation, we set up a preconditioner and a solver. The temperature matrix is a mass matrix (with eigenvalues around one) plus a Laplace matrix (with eigenvalues between zero and $ch^{-2}$) times a small number proportional to the time step $k_n$. Hence, the resulting symmetric and positive definite matrix has eigenvalues in the range $[1,1+k_nh^{-2}]$ (up to constants). This matrix is only moderately ill conditioned even for small mesh sizes and we get a reasonably good preconditioner by simple means, for example with an incomplete Cholesky decomposition preconditioner (IC) as we also use for preconditioning the pressure mass matrix solver. As a solver, we choose the conjugate gradient method CG. As before, we tell the solver to use Trilinos vectors via the template argument TrilinosWrappers::MPI::Vector. Finally, we solve, distribute the hanging node constraints and write out the number of iterations.

      assemble_temperature_system(maximal_velocity);
      {
      SolverControl solver_control(temperature_matrix.m(),
    @@ -2351,7 +2351,7 @@ \ \frac{1}{|\mathrm{diam}(\Omega)|^{\alpha-2}}$" src="form_4213.png"/> instead, where we had set the scaling parameter to one. Since we only computed on the unit square/cube where $\mathrm{diam}(\Omega)=2^{1/d}$, this was entirely equivalent to using the correct formula with $c_R=\left(2^{1/d}\right)^{4-2\alpha}=2^{\frac{4-2\alpha}{d}}$. Since this value for $c_R$ appears to work just fine for the current program, we corrected the formula in the program and set $c_R$ to a value that reproduces exactly the results we had before. We will, however, revisit this issue again in step-32.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-12-27 18:25:19.812949325 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-12-27 18:25:19.820949380 +0000 @@ -179,58 +179,58 @@

    In addition to these changes, we also use a slightly different preconditioner, and we will have to make a number of changes that have to do with the fact that we want to solve a realistic problem here, not a model problem. The latter, in particular, will require that we think about scaling issues as well as what all those parameters and coefficients in the equations under consideration actually mean. We will discuss first the issues that affect changes in the mathematical formulation and solver structure, then how to parallelize things, and finally the actual testcase we will consider.

    Using the "right" pressure

    In step-31, we used the following Stokes model for the velocity and pressure field:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho \; \beta \; T \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4253.png"/>

    -

    The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that $\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
-[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    -\begin{eqnarray*}
+<p> The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that <picture><source srcset=$\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
+[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho_{\text{ref}} [1-\beta(T-T_{\text{ref}})] \mathbf{g}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4257.png"/>

    -

    Now note that the gravity force results from a gravity potential as $\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    -\begin{eqnarray*}
+<p> Now note that the gravity force results from a gravity potential as <picture><source srcset=$\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho_{\text{ref}} \; \beta\; T\; \mathbf{g}
   -\rho_{\text{ref}} [1+\beta T_{\text{ref}}] \nabla\varphi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4259.png"/>

    -

    The second term on the right is time independent, and so we could introduce a new "dynamic" pressure $p_{\text{dyn}}=p+\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    -\begin{eqnarray*}
+<p> The second term on the right is time independent, and so we could introduce a new $p_{\text{dyn}}=p+\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p_{\text{dyn}} &=&
   -\rho_{\text{ref}} \; \beta \; T \; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4261.png"/>

    This is exactly the form we used in step-31, and it was appropriate to do so because all changes in the fluid flow are only driven by the dynamic pressure that results from temperature differences. (In other words: Any contribution to the right hand side that results from taking the gradient of a scalar field have no effect on the velocity field.)

    On the other hand, we will here use the form of the Stokes equations that considers the total pressure instead:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T)\; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4262.png"/>

    There are several advantages to this:

    • This way we can plot the pressure in our program in such a way that it actually shows the total pressure that includes the effects of temperature differences as well as the static pressure of the overlying rocks. Since the pressure does not appear any further in any of the other equations, whether to use one or the other is more a matter of taste than of correctness. The flow field is exactly the same, but we get a pressure that we can now compare with values that are given in geophysical books as those that hold at the bottom of the earth mantle, for example.
    • If we wanted to make the model even more realistic, we would have to take into account that many of the material parameters (e.g. the viscosity, the density, etc) not only depend on the temperature but also the total pressure.
    • -
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
-  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • +
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
+  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • A final reason to do this is discussed in the results section and concerns possible extensions to the model we use here. It has to do with the fact that the temperature equation (see below) we use here does not include a term that contains the pressure. It should, however: rock, like gas, heats up as you compress it. Consequently, material that rises up cools adiabatically, and cold material that sinks down heats adiabatically. We discuss this further below.
    -
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.
    +
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.

    The scaling of discretized equations

    Remember that we want to solve the following set of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \mathbf{g},
   \\
@@ -241,11 +241,11 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_4265.png"/>

    augmented by appropriate boundary and initial conditions. As discussed in step-31, we will solve this set of equations by solving for a Stokes problem first in each time step, and then moving the temperature equation forward by one time interval.

    The problem under consideration in this current section is with the Stokes problem: if we discretize it as usual, we get a linear system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   M \; X
   =
   \left(\begin{array}{cc}
@@ -260,10 +260,10 @@
   \end{array}\right)
   =
   F
-\end{eqnarray*} +\end{eqnarray*}" src="form_4266.png"/>

    which in this program we will solve with a FGMRES solver. This solver iterates until the residual of these linear equations is below a certain tolerance, i.e., until

    -\[
+<picture><source srcset=\[
   \left\|
   \left(\begin{array}{c}
     F_U - A U^{(k)} - B P^{(k)}
@@ -272,35 +272,35 @@
   \end{array}\right)
   \right\|
   < \text{Tol}.
-\] +\]" src="form_4267.png"/>

    -

    This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units $\frac{\text{Pa}}{\text{m}}
-\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
-       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    -

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
-\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    -

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    -\begin{eqnarray*}
+<p> This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units  <picture><source srcset=$\frac{\text{Pa}}{\text{m}}
+\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
+       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    +

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
+\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    +

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4282.png"/>

    -

    The trouble with this is that the result is not symmetric any more (we have $\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    -\begin{eqnarray*}
+<p> The trouble with this is that the result is not symmetric any more (we have <picture><source srcset=$\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) +
   \nabla \left(\frac{\eta}{L} \hat p\right) &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4285.png"/>

    -

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    -

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    +

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    +

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    Changes to the Stokes preconditioner and solver

    -

    In this tutorial program, we apply a variant of the preconditioner used in step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    -\begin{eqnarray*}
+<p>In this tutorial program, we apply a variant of the preconditioner used in <a class=step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    +\begin{eqnarray*}
   P^{-1} M
   =
   \left(\begin{array}{cc}
@@ -309,24 +309,24 @@
   \left(\begin{array}{cc}
     A & B^T \\ B & 0
   \end{array}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_4287.png"/>

    -

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    -

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    +

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    +

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    That said, even though the solver worked well for step-31, we have a problem here that is a bit more complicated (cells are deformed, the pressure varies by orders of magnitude, and we want to plan ahead for more complicated physics), and so we'll change a few things slightly:

    • For more complex problems, it turns out that using just a single AMG V-cycle as preconditioner is not always sufficient. The outer solver converges just fine most of the time in a reasonable number of iterations (say, less than 50) but there are the occasional time step where it suddenly takes 700 or so. What exactly is going on there is hard to determine, but the problem can be avoided by using a more accurate solver for the top left block. Consequently, we'll want to use a CG iteration to invert the top left block of the preconditioner matrix, and use the AMG as a preconditioner for the CG solver.
    • The downside of this is that, of course, the Stokes preconditioner becomes much more expensive (approximately 10 times more expensive than when we just use a single V-cycle). Our strategy then is this: let's do up to 30 GMRES iterations with just the V-cycle as a preconditioner and if that doesn't yield convergence, then take the best approximation of the Stokes solution obtained after this first round of iterations and use that as the starting guess for iterations where we use the full inner solver with a rather lenient tolerance as preconditioner. In all our experiments this leads to convergence in only a few additional iterations.
    • -
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • -
    • On the other hand, once we have settled on using F-GMRES we can relax the tolerance used in inverting the preconditioner for $S$. In step-31, we ran a preconditioned CG method on $\tilde S$ until the residual had been reduced by 7 orders of magnitude. Here, we can again be more lenient because we know that the outer preconditioner doesn't suffer.
    • +
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • +
    • On the other hand, once we have settled on using F-GMRES we can relax the tolerance used in inverting the preconditioner for $S$. In step-31, we ran a preconditioned CG method on $\tilde S$ until the residual had been reduced by 7 orders of magnitude. Here, we can again be more lenient because we know that the outer preconditioner doesn't suffer.
    • In step-31, we used a left preconditioner in which we first invert the top left block of the preconditioner matrix, then apply the bottom left (divergence) one, and then invert the bottom right. In other words, the application of the preconditioner acts as a lower left block triangular matrix. Another option is to use a right preconditioner that here would be upper right block triangulation, i.e., we first invert the bottom right Schur complement, apply the top right (gradient) operator and then invert the elliptic top left block. To a degree, which one to choose is a matter of taste. That said, there is one significant advantage to a right preconditioner in GMRES-type solvers: the residual with which we determine whether we should stop the iteration is the true residual, not the norm of the preconditioned equations. Consequently, it is much simpler to compare it to the stopping criterion we typically use, namely the norm of the right hand side vector. In writing this code we found that the scaling issues we discussed above also made it difficult to determine suitable stopping criteria for left-preconditioned linear systems, and consequently this program uses a right preconditioner.
    • In step-31, we used an IC (incomplete Cholesky) preconditioner for the pressure mass matrix in the Schur complement preconditioner and for the solution of the temperature system. Here, we could in principle do the same, but we do choose an even simpler preconditioner, namely a Jacobi preconditioner for both systems. This is because here we target at massively parallel computations, where the decompositions for IC/ILU would have to be performed block-wise for the locally owned degrees of freedom on each processor. This means, that the preconditioner gets more like a Jacobi preconditioner anyway, so we rather start from that variant straight away. Note that we only use the Jacobi preconditioners for CG solvers with mass matrices, where they give optimal (h-independent) convergence anyway, even though they usually require about twice as many iterations as an IC preconditioner.
    -

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
-\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    +

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
+\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    Changes to the artificial viscosity stabilization

    -

    Similarly to step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    -\begin{eqnarray*}
+<p>Similarly to <a class=step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    +\begin{eqnarray*}
   \nu_\alpha(T)|_K
   =
   \nu_1(T)|_K
@@ -338,76 +338,76 @@
     1,
     \frac{\|R_1(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)}
   \right\}
-\end{eqnarray*} +\end{eqnarray*}" src="form_4294.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-12-27 18:25:19.936950177 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-12-27 18:25:19.936950177 +0000 @@ -179,16 +179,16 @@

    Introduction

    Euler flow

    The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension $d$ they read

    -\[
+<picture><source srcset=\[
 \partial_t \mathbf{w} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_4391.png"/>

    -

    with the solution $\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
-E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
-G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    -

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    -\begin{eqnarray*}
+<p> with the solution  <picture><source srcset=$\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
+E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
+G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    +

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    +\begin{eqnarray*}
   \mathbf F(\mathbf w)
   =
   \left(
@@ -200,10 +200,10 @@
     (E+p) v_1 & (E+p) v_2 & (E+p) v_3
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4398.png"/>

    and we will choose as particular right hand side forcing only the effects of gravity, described by

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf G(\mathbf w)
   =
   \left(
@@ -215,43 +215,43 @@
     \rho \mathbf g \cdot \mathbf v
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4399.png"/>

    -

    where $\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    +\begin{eqnarray*}
   \partial_t (\rho v_i) + \sum_{s=1}^d \frac{\partial(\rho v_i v_s +
   \delta_{is} p)}{\partial x_s} &=& g_i \rho, \qquad i=1,\dots,d, \\
   \partial_t \rho + \sum_{s=1}^d \frac{\partial(\rho v_s)}{\partial x_s} &=& 0,  \\
   \partial_t E + \sum_{s=1}^d \frac{\partial((E+p)v_s)}{\partial x_s} &=&
   \rho \mathbf g \cdot \mathbf v.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4401.png"/>

    -

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
-(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    +

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
+(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    This problem obviously falls into the class of vector-valued problems. A general overview of how to deal with these problems in deal.II can be found in the Handling vector valued problems topic.

    Discretization

    -

    Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    -\begin{eqnarray*}
+<p>Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in <a class=step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    +\begin{eqnarray*}
 &&\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) + (\nabla \cdot \mathbf{F}(\mathbf{w}), \mathbf{z}) \\
 &\approx &\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) - (\mathbf{F}(\mathbf{w}), \nabla \mathbf{z}) + h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z}) + \int_{\partial \Omega} (\mathbf{H}(\mathbf{w}^+, \mathbf{w}^-, \mathbf{n}), \mathbf{z}^+),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4406.png"/>

    -

    where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

    -

    On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      +

      where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

      +

      On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      • -Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • +Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • -Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • +Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • -Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • +Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • -Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.
      • +Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.

      More information on these issues can be found, for example, in Ralf Hartmann's PhD thesis ("Adaptive Finite Element Methods for the Compressible Euler Equations", PhD thesis, University of Heidelberg, 2002).

      -

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      +

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
  \mathbf{B}(\mathbf{w}_{n})(\mathbf z)  &=&
 - \int_{\Omega} \left(\mathbf{F}(\mathbf{w}_n),
 \nabla\mathbf{z}\right) +  h^{\eta}(\nabla \mathbf{w}_n , \nabla \mathbf{z}) \\
@@ -261,43 +261,43 @@
 -
 \int_{\Omega} \left(\mathbf{G}(\mathbf{w}_n),
 \mathbf{z}\right) .
-\end{eqnarray*} +\end{eqnarray*}" src="form_4417.png"/>

      -

      At each time step, our full discretization is thus that the residual applied to any test function $\mathbf z$ equals zero:

      -\begin{eqnarray*}
+<p>At each time step, our full discretization is thus that the residual applied to any test function <picture><source srcset=$\mathbf z$ equals zero:

      +\begin{eqnarray*}
 R(\mathbf{W}_{n+1})(\mathbf z) &=&
 \int_{\Omega} \left(\frac{{\mathbf w}_{n+1} - \mathbf{w}_n}{\delta t},
 \mathbf{z}\right)+
 \theta \mathbf{B}({\mathbf{w}}_{n+1}) +  (1-\theta) \mathbf{B}({\mathbf w}_{n}) \\
 &=& 0
-\end{eqnarray*} +\end{eqnarray*}" src="form_4419.png"/>

      -

      where $ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      -

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
+<p> where <picture><source srcset=$ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      +

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
 \frac{1}{2}(\mathbf{F}(\mathbf{a})\cdot \mathbf{n} +
-\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      -

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      -\begin{eqnarray*}
+\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      +

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      +\begin{eqnarray*}
 R'(\mathbf{W}^k_{n+1},\delta \mathbf{W}_{n+1}^k)(\mathbf z) & = & -
 R(\mathbf{W}^{k}_{n+1})(\mathbf z) \qquad \qquad \forall \mathbf z\in V_h \\
 \mathbf{W}^{k+1}_{n+1} &=& \mathbf{W}^k_{n+1} + \delta \mathbf{W}^k_{n+1},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4426.png"/>

      -

      until $|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      -\begin{eqnarray*}
+<p> until <picture><source srcset=$|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      +\begin{eqnarray*}
 \mathbf R'(\mathbf{W}^k_{n+1})\delta \mathbf{W}^k_{n+1} & = & -
 \mathbf R(\mathbf{W}^{k}_{n+1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_4429.png"/>

      This linear system is, in general, neither symmetric nor has any particular definiteness properties. We will either use a direct solver or Trilinos' GMRES implementation to solve it. As will become apparent from the results shown below, this fully implicit iteration converges very rapidly (typically in 3 steps) and with the quadratic convergence order expected from a Newton method.

      Automatic differentiation

      -

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic +

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic differentiation") that supports basic arithmetic operators and functions such as sqrt, sin, cos, pow, etc. In order to use this feature, one declares a collection of variables of this type and then denotes some of this collection as degrees of freedom, the rest of the variables being functions of the independent variables. These variables are used in an algorithm, and as the variables are used, their sensitivities with respect to the degrees of freedom are continuously updated.

      -

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
-R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      -

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      -

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
-R$.

      +

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
+R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      +

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      +

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
+R$.

      All this said, here's a very simple example showing how Sacado can be used:

      #href_anchor"line">#include <iostream>
      @@ -320,8 +320,8 @@
      std::cout << "dc/da = " << derivs[0] << ", dc/db=" << derivs[1] << std::endl;
      }
      -

      The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
-\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

      +

    The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
+\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

    It should be noted that Sacado provides more auto-differentiation capabilities than the small subset used in this program. However, understanding the example above is enough to understand the use of Sacado in this Euler flow program.

    Trilinos solvers

    The program uses either the Aztec iterative solvers, or the Amesos sparse direct solver, both provided by the Trilinos package. This package is inherently designed to be used in a parallel program, however, it may be used in serial just as easily, as is done here. The Epetra package is the basic vector/matrix library upon which the solvers are built. This very powerful package can be used to describe the parallel distribution of a vector, and to define sparse matrices that operate on these vectors. Please view the commented code for more details on how these solvers are used within the example.

    @@ -338,8 +338,8 @@

    Implementation

    The implementation of this program is split into three essential parts:

    • -

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
-  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

      +

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
+  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

    • @@ -447,12 +447,12 @@

      Transformations between variables

      -

      Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

      +

      Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

        static const double gas_gamma;
       
       
      -

      In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
-   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

      +

      In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
+   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

        template <typename InputVector>
        static typename InputVector::value_type
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-12-27 18:25:20.024950781 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-12-27 18:25:20.032950836 +0000 @@ -152,7 +152,7 @@

      Irrotational flow

      The incompressible motion of an inviscid fluid past a body (for example air past an airplane wing, or air or water past a propeller) is usually modeled by the Euler equations of fluid dynamics:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac{\partial }{\partial t}\mathbf{v} + (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p + \mathbf{g}
@@ -160,12 +160,12 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4505.png"/>

      -

      where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

      +

      where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

      The above equations can be derived from Navier-Stokes equations assuming that the effects due to viscosity are negligible compared to those due to the pressure gradient, inertial forces and the external forces. This is the opposite case of the Stokes equations discussed in step-22 which are the limit case of dominant viscosity, i.e. where the velocity is so small that inertia forces can be neglected. On the other hand, owing to the assumed incompressibility, the equations are not suited for very high speed gas flows where compressibility and the equation of state of the gas have to be taken into account, leading to the Euler equations of gas dynamics, a hyperbolic system.

      For the purpose of this tutorial program, we will consider only stationary flow without external forces:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p
@@ -173,159 +173,159 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4506.png"/>

      Uniqueness of the solution of the Euler equations is ensured by adding the boundary conditions

      -\[
+<picture><source srcset=\[
   \label{eq:boundary-conditions}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{v}& = 0 \qquad && \text{ on } \partial\Omega \\
     \mathbf{v}& = \mathbf{v}_\infty && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4507.png"/>

      -

      which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

      +

      which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

      For both stationary and non stationary flow, the solution process starts by solving for the velocity in the second equation and substituting in the first equation in order to find the pressure. The solution of the stationary Euler equations is typically performed in order to understand the behavior of the given (possibly complex) geometry when a prescribed motion is enforced on the system.

      -

      The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity $\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
-\mathbf{v}_\infty=0$) and we have boundary conditions

      -\[
+<p>The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity <picture><source srcset=$\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
+\mathbf{v}_\infty=0$) and we have boundary conditions

      +\[
   \label{eq:boundary-conditions-tilde}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{\tilde{v}}& = -\mathbf{n}\cdot\mathbf{v}_\infty \qquad && \text{ on } \partial\Omega \\
     \mathbf{\tilde{v}}& = 0 && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4511.png"/>

      -

      If we assume that the fluid is irrotational, i.e., $\nabla \times
-\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

      -\[
+<p>If we assume that the fluid is irrotational, i.e.,  <picture><source srcset=$\nabla \times
+\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

      +\[
   \mathbf{\tilde{v}}=\nabla\phi,
-\] +\]" src="form_4514.png"/>

      and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown $\phi$:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
 \label{laplace}
 \Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
            \\
            \mathbf{n}\cdot\nabla\phi &= -\mathbf{n}\cdot\mathbf{v}_\infty
            && \text{on}\ \partial\Omega
-\end{align*} +\end{align*}" src="form_4515.png"/>

      -

      while the momentum equation reduces to Bernoulli's equation that expresses the pressure $p$ as a function of the potential $\phi$:

      -\[
+<p> while the momentum equation reduces to Bernoulli's equation that expresses the pressure <picture><source srcset=$p$ as a function of the potential $\phi$:

      +\[
 \frac{p}{\rho} +\frac{1}{2} | \nabla \phi |^2 = 0 \in \Omega.
-\] +\]" src="form_4516.png"/>

      So we can solve the problem by solving the Laplace equation for the potential. We recall that the following functions, called fundamental solutions of the Laplace equation,

      -\[ \begin{aligned}
+<picture><source srcset=\[ \begin{aligned}
 \label{eq:3} G(\mathbf{y}-\mathbf{x}) = &
 -\frac{1}{2\pi}\ln|\mathbf{y}-\mathbf{x}| \qquad && \text{for } n=2 \\
 G(\mathbf{y}-\mathbf{x}) = &
 \frac{1}{4\pi}\frac{1}{|\mathbf{y}-\mathbf{x}|}&& \text{for } n=3,
 \end{aligned}
-\] +\]" src="form_4517.png"/>

      satisfy in a distributional sense the equation:

      -\[
+<picture><source srcset=\[
 -\Delta_y G(\mathbf{y}-\mathbf{x}) = \delta(\mathbf{y}-\mathbf{x}),
-\] +\]" src="form_4518.png"/>

      -

      where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

      +

      where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

      -\[\label{green}
+<picture><source srcset=\[\label{green}
   \int_{\omega}
   (-\Delta u)v\,dx + \int_{\partial\omega} \frac{\partial u}{\partial \tilde{\mathbf{n}} }v \,ds
   =
   \int_{\omega}
   (-\Delta v)u\,dx + \int_{\partial\omega} u\frac{\partial v}{\partial \tilde{\mathbf{n}}} \,ds,
-\] +\]" src="form_4521.png"/>

      -

      where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

      -

      In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
-\Gamma$, where the "boundary" at infinity is defined as

      +

      where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

      +

      In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
+\Gamma$, where the "boundary" at infinity is defined as

      -\[
+<picture><source srcset=\[
 \Gamma_\infty \dealcoloneq \lim_{r\to\infty} \partial B_r(0).
-\] +\]" src="form_4524.png"/>

      -

      In our program the normals are defined as outer to the domain $\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

      -

      If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

      -\[
+<p>In our program the normals are defined as <em>outer</em> to the domain <picture><source srcset=$\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

      +

      If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

      +\[
   \phi(\mathbf{x}) -
   \int_{\Gamma\cup\Gamma_\infty}\frac{\partial G(\mathbf{y}-\mathbf{x})}{\partial \mathbf{n}_y}\phi(\mathbf{y})\,ds_y
   =
   -\int_{\Gamma\cup\Gamma_\infty}G(\mathbf{y}-\mathbf{x})\frac{\partial \phi}{\partial \mathbf{n}_y}(\mathbf{y})\,ds_y
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega
-\] +\]" src="form_4526.png"/>

      where the normals are now pointing inward the domain of integration.

      -

      Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

      -

      The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

      +

      Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

      +

      The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

      -\[
+<picture><source srcset=\[
 -\int_{\Gamma_\infty} \frac{\partial G(\mathbf{y}-\mathbf{x})}
 {\partial \mathbf{n}_y}\phi_\infty \,ds_y =
 \lim_{r\to\infty} \int_{\partial B_r(0)} \frac{\mathbf{r}}{r} \cdot \nabla G(\mathbf{y}-\mathbf{x})
 \phi_\infty \,ds_y = -\phi_\infty.
-\] +\]" src="form_4531.png"/>

      Using this result, we can reduce the above equation only on the boundary $\Gamma$ using the so-called Single and Double Layer Potential operators:

      -\[\label{integral}
+<picture><source srcset=\[\label{integral}
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty
   -\left(S \frac{\partial \phi}{\partial n_y}\right)(\mathbf{x})
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4532.png"/>

      -

      (The name of these operators comes from the fact that they describe the electric potential in $\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

      -

      In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

      -\[
+<p>(The name of these operators comes from the fact that they describe the electric potential in <picture><source srcset=$\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

      +

      In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

      +\[
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
    \left(S[\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
    \qquad \forall\mathbf{x} \in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4534.png"/>

      -

      If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

      +

      If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

      -\[\label{SD}
+<picture><source srcset=\[\label{SD}
   \alpha(\mathbf{x})\phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
   \left(S [\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
   \quad \mathbf{x}\in \partial\Omega,
-\] +\]" src="form_4535.png"/>

      -

      which is the Boundary Integral Equation (BIE) we were looking for, where the quantity $\alpha(\mathbf{x})$ is the fraction of angle or solid angle by which the point $\mathbf{x}$ sees the domain of integration $\mathbb{R}^n\backslash\Omega$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-12-27 18:25:20.116951413 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-12-27 18:25:20.120951440 +0000 @@ -157,12 +157,12 @@ \nabla \cdot u = 0, \end{align*}" src="form_4615.png"/>

      -

      where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

      +

      where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

      \[
   u |_{t=0} = u_0,
 \]

      -

      with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

      +

      with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

      \[
   u|_{\partial\Omega} = u_b.
 \] @@ -177,14 +177,14 @@ u\times n|_{\Gamma_2} = 0, \quad p|_{\Gamma_2} = 0 \]" src="form_4620.png"/>

      -

      where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

      +

      where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

      In previous tutorial programs (see for instance step-20 and step-22) we have seen how to solve the time-independent Stokes equations using a Schur complement approach. For the time-dependent case, after time discretization, we would arrive at a system like

      \begin{align*}
   \frac1\tau u^k - \nu \Delta u^k + \nabla p^k = F^k, \\
   \nabla \cdot u^k = 0,
 \end{align*}

      -

      where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

      +

      where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

      Projection methods

      Rather, we need to come up with a different approach to solve the time-dependent Navier-Stokes equations. The difficulty in their solution comes from the fact that the velocity and the pressure are coupled through the constraint

      \[
@@ -276,7 +276,7 @@
 </p>
  with <picture><source srcset=$v^{k+1}\in H$. Taking the divergence of this equation we arrive at the projection equation.

    • -The more accurate of the two variants outlined above is the rotational one. However, the program below implements both variants. Moreover, in the author's experience, the standard form is the one that should be used if, for instance, the viscosity $\nu$ is variable.
    • +The more accurate of the two variants outlined above is the rotational one. However, the program below implements both variants. Moreover, in the author's experience, the standard form is the one that should be used if, for instance, the viscosity $\nu$ is variable.

    The standard incremental scheme and the rotational incremental scheme were first considered by van Kan in

    • /usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 2024-12-27 18:25:20.168951770 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 2024-12-27 18:25:20.176951825 +0000 @@ -158,7 +158,7 @@ \Omega\quad, \\ \Psi &= 0 &&\text{on}\ \partial\Omega\quad. \end{align*}" src="form_4670.png"/>

      -

      As a consequence, this particle can only exist in a certain number of eigenstates that correspond to the energy eigenvalues $\varepsilon_\ell$ admitted as solutions of this equation. The orthodox (Copenhagen) interpretation of quantum mechanics posits that, if a particle has energy $\varepsilon_\ell$ then the probability of finding it at location $\mathbf x$ is proportional to $|\Psi_\ell(\mathbf
+<p> As a consequence, this particle can only exist in a certain number of eigenstates that correspond to the energy eigenvalues <picture><source srcset=$\varepsilon_\ell$ admitted as solutions of this equation. The orthodox (Copenhagen) interpretation of quantum mechanics posits that, if a particle has energy $\varepsilon_\ell$ then the probability of finding it at location $\mathbf x$ is proportional to $|\Psi_\ell(\mathbf
 x)|^2$ where $\Psi_\ell$ is the eigenfunction that corresponds to this eigenvalue.

      In order to numerically find solutions to this equation, i.e. a set of pairs of eigenvalues/eigenfunctions, we use the usual finite element approach of multiplying the equation from the left with test functions, integrating by parts, and searching for solutions in finite dimensional spaces by approximating $\Psi(\mathbf
 x)\approx\Psi_h(\mathbf x)=\sum_{j}\phi_j(\mathbf x)\tilde\psi_j$, where $\tilde\psi$ is a vector of expansion coefficients. We then immediately arrive at the following equation that discretizes the continuous eigenvalue problem:

      @@ -170,9 +170,9 @@ \[ A
 \tilde{\Psi} = \varepsilon_h M \tilde{\Psi} \quad, \]

      -

      where $A$ is the stiffness matrix arising from the differential operator $L$, and $M$ is the mass matrix. The solution to the eigenvalue problem is an eigenspectrum $\varepsilon_{h,\ell}$, with associated eigenfunctions $\Psi_\ell=\sum_j \phi_j\tilde{\psi}_j$.

      +

      where $A$ is the stiffness matrix arising from the differential operator $L$, and $M$ is the mass matrix. The solution to the eigenvalue problem is an eigenspectrum $\varepsilon_{h,\ell}$, with associated eigenfunctions $\Psi_\ell=\sum_j \phi_j\tilde{\psi}_j$.

      Eigenvalues and Dirichlet boundary conditions

      -

      In this program, we use Dirichlet boundary conditions for the wave function $\Psi$. What this means, from the perspective of a finite element code, is that only the interior degrees of freedom are real degrees of freedom: the ones on the boundary are not free but are forced to have a zero value, after all. On the other hand, the finite element method gains much of its power and simplicity from the fact that we just do the same thing on every cell, without having to think too much about where a cell is, whether it bounds on a less refined cell and consequently has a hanging node, or is adjacent to the boundary. All such checks would make the assembly of finite element linear systems unbearably difficult to write and even more so to read.

      +

      In this program, we use Dirichlet boundary conditions for the wave function $\Psi$. What this means, from the perspective of a finite element code, is that only the interior degrees of freedom are real degrees of freedom: the ones on the boundary are not free but are forced to have a zero value, after all. On the other hand, the finite element method gains much of its power and simplicity from the fact that we just do the same thing on every cell, without having to think too much about where a cell is, whether it bounds on a less refined cell and consequently has a hanging node, or is adjacent to the boundary. All such checks would make the assembly of finite element linear systems unbearably difficult to write and even more so to read.

      Consequently, of course, when you distribute degrees of freedom with your DoFHandler object, you don't care whether some of the degrees of freedom you enumerate are at a Dirichlet boundary. They all get numbers. We just have to take care of these degrees of freedom at a later time when we apply boundary values. There are two basic ways of doing this (either using MatrixTools::apply_boundary_values() after assembling the linear system, or using AffineConstraints::distribute_local_to_global() during assembly; see the constraints topic for more information), but both result in the same: a linear system that has a total number of rows equal to the number of all degrees of freedom, including those that lie on the boundary. However, degrees of freedom that are constrained by Dirichlet conditions are separated from the rest of the linear system by zeroing out the corresponding row and column, putting a single positive entry on the diagonal, and the corresponding Dirichlet value on the right hand side.

      If you assume for a moment that we had renumbered degrees of freedom in such a way that all of those on the Dirichlet boundary come last, then the linear system we would get when solving a regular PDE with a right hand side would look like this:

      \begin{align*}
@@ -188,8 +188,8 @@
   \end{pmatrix}.
 \end{align*}

      -

      Here, subscripts $i$ and $b$ correspond to interior and boundary degrees of freedom, respectively. The interior degrees of freedom satisfy the linear system $A_i U_i=F_i$ which yields the correct solution in the interior, and boundary values are determined by $U_b = D_b^{-1} F_b$ where $D_b$ is a diagonal matrix that results from the process of eliminating boundary degrees of freedom, and $F_b$ is chosen in such a way that $U_{b,j}=D_{b,jj}^{-1} F_{b,j}$ has the correct boundary values for every boundary degree of freedom $j$. (For the curious, the entries of the matrix $D_b$ result from adding modified local contributions to the global matrix where for the local matrices the diagonal elements, if non-zero, are set to their absolute value; otherwise, they are set to the average of absolute values of the diagonal. This process guarantees that the entries of $D_b$ are positive and of a size comparable to the rest of the diagonal entries, ensuring that the resulting matrix does not incur unreasonable losses of accuracy due to roundoff involving matrix entries of drastically different size. The actual values that end up on the diagonal are difficult to predict and you should treat them as arbitrary and unpredictable, but positive.)

      -

      For "regular" linear systems, this all leads to the correct solution. On the other hand, for eigenvalue problems, this is not so trivial. There, eliminating boundary values affects both matrices $A$ and $M$ that we will solve with in the current tutorial program. After elimination of boundary values, we then receive an eigenvalue problem that can be partitioned like this:

      +

      Here, subscripts $i$ and $b$ correspond to interior and boundary degrees of freedom, respectively. The interior degrees of freedom satisfy the linear system $A_i U_i=F_i$ which yields the correct solution in the interior, and boundary values are determined by $U_b = D_b^{-1} F_b$ where $D_b$ is a diagonal matrix that results from the process of eliminating boundary degrees of freedom, and $F_b$ is chosen in such a way that $U_{b,j}=D_{b,jj}^{-1} F_{b,j}$ has the correct boundary values for every boundary degree of freedom $j$. (For the curious, the entries of the matrix $D_b$ result from adding modified local contributions to the global matrix where for the local matrices the diagonal elements, if non-zero, are set to their absolute value; otherwise, they are set to the average of absolute values of the diagonal. This process guarantees that the entries of $D_b$ are positive and of a size comparable to the rest of the diagonal entries, ensuring that the resulting matrix does not incur unreasonable losses of accuracy due to roundoff involving matrix entries of drastically different size. The actual values that end up on the diagonal are difficult to predict and you should treat them as arbitrary and unpredictable, but positive.)

      +

      For "regular" linear systems, this all leads to the correct solution. On the other hand, for eigenvalue problems, this is not so trivial. There, eliminating boundary values affects both matrices $A$ and $M$ that we will solve with in the current tutorial program. After elimination of boundary values, we then receive an eigenvalue problem that can be partitioned like this:

      \begin{align*}
   \begin{pmatrix}
     A_i & 0 \\ 0 & D_A
@@ -528,9 +528,9 @@
 <div class=  eigenfunctions,

        eigenfunctions.size());
       
      -

      The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

      -

      Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
-   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

      +

      The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

      +

      Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
+   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

        for (auto &eigenfunction : eigenfunctions)
        eigenfunction /= eigenfunction.linfty_norm();
       
      @@ -666,7 +666,7 @@
      set Global mesh refinement steps = 5
      set Number of eigenvalues/eigenfunctions = 5
      set Potential = 0
      -

      Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

      examples/step-36> make run
      +

      Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

      examples/step-36> make run
      ============================ Running step-36
      Number of active cells: 1024
      Number of degrees of freedom: 1089
      @@ -727,7 +727,7 @@

    • -

      In our derivation of the problem we have assumed that the particle is confined to a domain $\Omega$ and that at the boundary of this domain its probability $|\Psi|^2$ of being is zero. This is equivalent to solving the eigenvalue problem on all of ${\mathbb R}^d$ and assuming that the energy potential is finite only inside a region $\Omega$ and infinite outside. It is relatively easy to show that $|\Psi(\mathbf x)|^2$ at all locations $\mathbf x$ where $V(\mathbf
+<p class=In our derivation of the problem we have assumed that the particle is confined to a domain $\Omega$ and that at the boundary of this domain its probability $|\Psi|^2$ of being is zero. This is equivalent to solving the eigenvalue problem on all of ${\mathbb R}^d$ and assuming that the energy potential is finite only inside a region $\Omega$ and infinite outside. It is relatively easy to show that $|\Psi(\mathbf x)|^2$ at all locations $\mathbf x$ where $V(\mathbf
 x)=\infty$. So the question is what happens if our potential is not of this form, i.e. there is no bounded domain outside of which the potential is infinite? In that case, it may be worth to just consider a very large domain at the boundary of which $V(\mathbf x)$ is at least very large, if not infinite. Play around with a few cases like this and explore how the spectrum and eigenfunctions change as we make the computational region larger and larger.

    • @@ -736,7 +736,7 @@

    • -

      The plots above show the wave function $\Psi(\mathbf x)$, but the physical quantity of interest is actually the probability density $|\Psi(\mathbf x)|^2$ for the particle to be at location $\mathbf x$. Some visualization programs can compute derived quantities from the data in an input file, but we can also do so right away when creating the output file. The facility to do that is the DataPostprocessor class that can be used in conjunction with the DataOut class. Examples of how this can be done can be found in step-29 and step-33.

      +

      The plots above show the wave function $\Psi(\mathbf x)$, but the physical quantity of interest is actually the probability density $|\Psi(\mathbf x)|^2$ for the particle to be at location $\mathbf x$. Some visualization programs can compute derived quantities from the data in an input file, but we can also do so right away when creating the output file. The facility to do that is the DataPostprocessor class that can be used in conjunction with the DataOut class. Examples of how this can be done can be found in step-29 and step-33.

    • /usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 2024-12-27 18:25:20.260952402 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 2024-12-27 18:25:20.268952457 +0000 @@ -166,23 +166,23 @@

      The major motivation for matrix-free methods is the fact that on today's processors access to main memory (i.e., for objects that do not fit in the caches) has become the bottleneck in many solvers for partial differential equations: To perform a matrix-vector product based on matrices, modern CPUs spend far more time waiting for data to arrive from memory than on actually doing the floating point multiplications and additions. Thus, if we could substitute looking up matrix elements in memory by re-computing them — or rather, the operator represented by these entries —, we may win in terms of overall run-time even if this requires a significant number of additional floating point operations. That said, to realize this with a trivial implementation is not enough and one needs to really look at the details to gain in performance. This tutorial program and the papers referenced above show how one can implement such a scheme and demonstrates the speedup that can be obtained.

      The test case

      In this example, we consider the Poisson problem

      -\begin{eqnarray*} -
+<picture><source srcset=\begin{eqnarray*} -
 \nabla \cdot a(\mathbf x) \nabla u &=& 1, \\ u &=& 0 \quad \text{on}\
-\partial \Omega \end{eqnarray*} +\partial \Omega \end{eqnarray*}" src="form_4718.png"/>

      -

      where $a(\mathbf x)$ is a variable coefficient. Below, we explain how to implement a matrix-vector product for this problem without explicitly forming the matrix. The construction can, of course, be done in a similar way for other equations as well.

      -

      We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{1}{0.05 +
-2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

      +

      where $a(\mathbf x)$ is a variable coefficient. Below, we explain how to implement a matrix-vector product for this problem without explicitly forming the matrix. The construction can, of course, be done in a similar way for other equations as well.

      +

      We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{1}{0.05 +
+2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

      Matrix-vector product implementation

      In order to find out how we can write a code that performs a matrix-vector product, but does not need to store the matrix elements, let us start at looking how a finite element matrix A is assembled:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}}
 P_{\mathrm{cell,{loc-glob}}}^T A_{\mathrm{cell}} P_{\mathrm{cell,{loc-glob}}}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4721.png"/>

      In this formula, the matrix Pcell,loc-glob is a rectangular matrix that defines the index mapping from local degrees of freedom in the current cell to the global degrees of freedom. The information from which this operator can be built is usually encoded in the local_dof_indices variable and is used in the assembly calls filling matrices in deal.II. Here, Acell denotes the cell matrix associated with A.

      If we are to perform a matrix-vector product, we can hence use that

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 y &=& A\cdot u = \left(\sum_{\text{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 A_\mathrm{cell} P_\mathrm{cell,{loc-glob}}\right) \cdot u
 \\
@@ -191,7 +191,7 @@
 \\
 &=& \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 v_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4722.png"/>

      where ucell are the values of u at the degrees of freedom of the respective cell, and vcell=Acellucell correspondingly for the result. A naive attempt to implement the local action of the Laplacian would hence be to use the following code:

      Matrixfree<dim>::vmult (Vector<double> &dst,
      const Vector<double> &src) const
      @@ -250,43 +250,43 @@

      Here we neglected boundary conditions as well as any hanging nodes we may have, though neither would be very difficult to include using the AffineConstraints class. Note how we first generate the local matrix in the usual way as a sum over all quadrature points for each local matrix entry. To form the actual product as expressed in the above formula, we extract the values of src of the cell-related degrees of freedom (the action of Pcell,loc-glob), multiply by the local matrix (the action of Acell), and finally add the result to the destination vector dst (the action of Pcell,loc-globT, added over all the elements). It is not more difficult than that, in principle.

      While this code is completely correct, it is very slow. For every cell, we generate a local matrix, which takes three nested loops with loop length equal to the number of local degrees of freedom to compute. The multiplication itself is then done by two nested loops, which means that it is much cheaper.

      One way to improve this is to realize that conceptually the local matrix can be thought of as the product of three matrices,

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell} = B_\mathrm{cell}^T D_\mathrm{cell} B_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4723.png"/>

      where for the example of the Laplace operator the (q*dim+d,i)-th element of Bcell is given by fe_values.shape_grad(i,q)[d]. This matrix consists of dim*n_q_points rows and dofs_per_cell columns. The matrix Dcell is diagonal and contains the values fe_values.JxW(q) * coefficient_values[q] (or, rather, dim copies of each of these values). This kind of representation of finite element matrices can often be found in the engineering literature.

      When the cell matrix is applied to a vector,

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell}\cdot u_\mathrm{cell} = B_\mathrm{cell}^T
 D_\mathrm{cell} B_\mathrm{cell} \cdot u_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4724.png"/>

      -

      one would then not form the matrix-matrix products, but rather multiply one matrix at a time with a vector from right to left so that only three successive matrix-vector products are formed. This approach removes the three nested loops in the calculation of the local matrix, which reduces the complexity of the work on one cell from something like $\mathcal
-{O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
-{O}(\mathrm{dofs\_per\_cell}^2)$. An interpretation of this algorithm is that we first transform the vector of values on the local DoFs to a vector of gradients on the quadrature points. In the second loop, we multiply these gradients by the integration weight and the coefficient. The third loop applies the second gradient (in transposed form), so that we get back to a vector of (Laplacian) values on the cell dofs.

      +

      one would then not form the matrix-matrix products, but rather multiply one matrix at a time with a vector from right to left so that only three successive matrix-vector products are formed. This approach removes the three nested loops in the calculation of the local matrix, which reduces the complexity of the work on one cell from something like $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^2)$. An interpretation of this algorithm is that we first transform the vector of values on the local DoFs to a vector of gradients on the quadrature points. In the second loop, we multiply these gradients by the integration weight and the coefficient. The third loop applies the second gradient (in transposed form), so that we get back to a vector of (Laplacian) values on the cell dofs.

      The bottleneck in the above code is the operations done by the call to FEValues::reinit for every cell, which take about as much time as the other steps together (at least if the mesh is unstructured; deal.II can recognize that the gradients are often unchanged on structured meshes). That is certainly not ideal and we would like to do better than this. What the reinit function does is to calculate the gradient in real space by transforming the gradient on the reference cell using the Jacobian of the transformation from real to reference cell. This is done for each basis function on the cell, for each quadrature point. The Jacobian does not depend on the basis function, but it is different on different quadrature points in general. If you only build the matrix once as we've done in all previous tutorial programs, there is nothing to be optimized since FEValues::reinit needs to be called on every cell. In this process, the transformation is applied while computing the local matrix elements.

      In a matrix-free implementation, however, we will compute those integrals very often because iterative solvers will apply the matrix many times during the solution process. Therefore, we need to think about whether we may be able to cache some data that gets reused in the operator applications, i.e., integral computations. On the other hand, we realize that we must not cache too much data since otherwise we get back to the situation where memory access becomes the dominating factor. Therefore, we will not store the transformed gradients in the matrix B, as they would in general be different for each basis function and each quadrature point on every element for curved meshes.

      The trick is to factor out the Jacobian transformation and first apply the gradient on the reference cell only. This operation interpolates the vector of values on the local dofs to a vector of (unit-coordinate) gradients on the quadrature points. There, we first apply the Jacobian that we factored out from the gradient, then apply the weights of the quadrature, and finally apply the transposed Jacobian for preparing the third loop which tests by the gradients on the unit cell and sums over quadrature points.

      Let us again write this in terms of matrices. Let the matrix Bcell denote the cell-related gradient matrix, with each row containing the values on the quadrature points. It is constructed by a matrix-matrix product as

      -\begin{eqnarray*} B_\mathrm{cell} =
-J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell}, \end{eqnarray*} +\begin{eqnarray*} B_\mathrm{cell} =
+J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell}, \end{eqnarray*}

      where Bref_cell denotes the gradient on the reference cell and J-Tcell denotes the inverse transpose Jacobian of the transformation from unit to real cell (in the language of transformations, the operation represented by J-Tcell represents a covariant transformation). J-Tcell is block-diagonal, and the blocks size is equal to the dimension of the problem. Each diagonal block is the Jacobian transformation that goes from the reference cell to the real cell.

      Putting things together, we find that

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell} = B_\mathrm{cell}^T D B_\mathrm{cell}
                 = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^{-1}
                   D_\mathrm{cell}
                   J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4728.png"/>

      so we calculate the product (starting the local product from the right)

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 v_\mathrm{cell} = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^{-1} D J_\mathrm{cell}^{-\mathrm T}
 B_\mathrm{ref\_cell} u_\mathrm{cell}, \quad
 v = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 v_\mathrm{cell}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4729.png"/>

      FEValues<dim> fe_values_reference (fe, quadrature_formula,
      @@ -364,11 +364,11 @@
      @ update_inverse_jacobians
      Volume element.
      void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
      SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
      -

      Note how we create an additional FEValues object for the reference cell gradients and how we initialize it to the reference cell. The actual derivative data is then applied by the inverse, transposed Jacobians (deal.II calls the Jacobian matrix from real to unit cell inverse_jacobian, as the forward transformation is from unit to real cell). The factor $J_\mathrm{cell}^{-1} D_\mathrm{cell} J_\mathrm{cell}^{-\mathrm T}$ is block-diagonal over quadrature. In this form, one realizes that variable coefficients (possibly expressed through a tensor) and general grid topologies with Jacobian transformations have a similar effect on the coefficient transforming the unit-cell derivatives.

      -

      At this point, one might wonder why we store the matrix $J_\mathrm{cell}^{-\mathrm T}$ and the coefficient separately, rather than only the complete factor $J_\mathrm{cell}^{-1} D_\mathrm{cell}
-J_\mathrm{cell}^{-\mathrm T}$. The latter would use less memory because the tensor is symmetric with six independent values in 3D, whereas for the former we would need nine entries for the inverse transposed Jacobian, one for the quadrature weight and Jacobian determinant, and one for the coefficient, totaling to 11 doubles. The reason is that the former approach allows for implementing generic differential operators through a common framework of cached data, whereas the latter specifically stores the coefficient for the Laplacian. In case applications demand for it, this specialization could pay off and would be worthwhile to consider. Note that the implementation in deal.II is smart enough to detect Cartesian or affine geometries where the Jacobian is constant throughout the cell and needs not be stored for every cell (and indeed often is the same over different cells as well).

      -

      The final optimization that is most crucial from an operation count point of view is to make use of the tensor product structure in the basis functions. This is possible because we have factored out the gradient from the reference cell operation described by Bref_cell, i.e., an interpolation operation over the completely regular data fields of the reference cell. We illustrate the process of complexity reduction in two space dimensions, but the same technique can be used in higher dimensions. On the reference cell, the basis functions are of the tensor product form $\phi(x,y) = \varphi_i(x) \varphi_j(y)$. The part of the matrix Bref_cell that computes the first component has the form $B_\mathrm{sub\_cell}^x = B_\mathrm{grad,x} \otimes B_\mathrm{val,y}$, where Bgrad,x and Bval,y contain the evaluation of all the 1D basis functions on all the 1D quadrature points. Forming a matrix U with U(j,i) containing the coefficient belonging to basis function $\varphi_i(x) \varphi_j(y)$, we get $(B_\mathrm{grad,x} \otimes
-B_\mathrm{val,y})u_\mathrm{cell} = B_\mathrm{val,y} U B_\mathrm{grad,x}$. This reduces the complexity for computing this product from $p^4$ to $2 p^3$, where p-1 is the degree of the finite element (i.e., equivalently, p is the number of shape functions in each coordinate direction), or $p^{2d}$ to $d p^{d+1}$ in general. The reason why we look at the complexity in terms of the polynomial degree is since we want to be able to go to high degrees and possibly increase the polynomial degree p instead of the grid resolution. Good algorithms for moderate degrees like the ones used here are linear in the polynomial degree independent on the dimension, as opposed to matrix-based schemes or naive evaluation through FEValues. The techniques used in the implementations of deal.II have been established in the spectral element community since the 1980s.

      +

      Note how we create an additional FEValues object for the reference cell gradients and how we initialize it to the reference cell. The actual derivative data is then applied by the inverse, transposed Jacobians (deal.II calls the Jacobian matrix from real to unit cell inverse_jacobian, as the forward transformation is from unit to real cell). The factor $J_\mathrm{cell}^{-1} D_\mathrm{cell} J_\mathrm{cell}^{-\mathrm T}$ is block-diagonal over quadrature. In this form, one realizes that variable coefficients (possibly expressed through a tensor) and general grid topologies with Jacobian transformations have a similar effect on the coefficient transforming the unit-cell derivatives.

      +

      At this point, one might wonder why we store the matrix $J_\mathrm{cell}^{-\mathrm T}$ and the coefficient separately, rather than only the complete factor $J_\mathrm{cell}^{-1} D_\mathrm{cell}
+J_\mathrm{cell}^{-\mathrm T}$. The latter would use less memory because the tensor is symmetric with six independent values in 3D, whereas for the former we would need nine entries for the inverse transposed Jacobian, one for the quadrature weight and Jacobian determinant, and one for the coefficient, totaling to 11 doubles. The reason is that the former approach allows for implementing generic differential operators through a common framework of cached data, whereas the latter specifically stores the coefficient for the Laplacian. In case applications demand for it, this specialization could pay off and would be worthwhile to consider. Note that the implementation in deal.II is smart enough to detect Cartesian or affine geometries where the Jacobian is constant throughout the cell and needs not be stored for every cell (and indeed often is the same over different cells as well).

      +

      The final optimization that is most crucial from an operation count point of view is to make use of the tensor product structure in the basis functions. This is possible because we have factored out the gradient from the reference cell operation described by Bref_cell, i.e., an interpolation operation over the completely regular data fields of the reference cell. We illustrate the process of complexity reduction in two space dimensions, but the same technique can be used in higher dimensions. On the reference cell, the basis functions are of the tensor product form $\phi(x,y) = \varphi_i(x) \varphi_j(y)$. The part of the matrix Bref_cell that computes the first component has the form $B_\mathrm{sub\_cell}^x = B_\mathrm{grad,x} \otimes B_\mathrm{val,y}$, where Bgrad,x and Bval,y contain the evaluation of all the 1D basis functions on all the 1D quadrature points. Forming a matrix U with U(j,i) containing the coefficient belonging to basis function $\varphi_i(x) \varphi_j(y)$, we get $(B_\mathrm{grad,x} \otimes
+B_\mathrm{val,y})u_\mathrm{cell} = B_\mathrm{val,y} U B_\mathrm{grad,x}$. This reduces the complexity for computing this product from $p^4$ to $2 p^3$, where p-1 is the degree of the finite element (i.e., equivalently, p is the number of shape functions in each coordinate direction), or $p^{2d}$ to $d p^{d+1}$ in general. The reason why we look at the complexity in terms of the polynomial degree is since we want to be able to go to high degrees and possibly increase the polynomial degree p instead of the grid resolution. Good algorithms for moderate degrees like the ones used here are linear in the polynomial degree independent on the dimension, as opposed to matrix-based schemes or naive evaluation through FEValues. The techniques used in the implementations of deal.II have been established in the spectral element community since the 1980s.

      Implementing a matrix-free and cell-based finite element operator requires a somewhat different program design as compared to the usual matrix assembly codes shown in previous tutorial programs. The data structures for doing this are the MatrixFree class that collects all data and issues a (parallel) loop over all cells and the FEEvaluation class that evaluates finite element basis functions by making use of the tensor product structure.

      The implementation of the matrix-free matrix-vector product shown in this tutorial is slower than a matrix-vector product using a sparse matrix for linear elements, but faster for all higher order elements thanks to the reduced complexity due to the tensor product structure and due to less memory transfer during computations. The impact of reduced memory transfer is particularly beneficial when working on a multicore processor where several processing units share access to memory. In that case, an algorithm which is computation bound will show almost perfect parallel speedup (apart from possible changes of the processor's clock frequency through turbo modes depending on how many cores are active), whereas an algorithm that is bound by memory transfer might not achieve similar speedup (even when the work is perfectly parallel and one could expect perfect scaling like in sparse matrix-vector products). An additional gain with this implementation is that we do not have to build the sparse matrix itself, which can also be quite expensive depending on the underlying differential equation. Moreover, the above framework is simple to generalize to nonlinear operations, as we demonstrate in step-48.

      Combination with multigrid

      @@ -424,14 +424,14 @@
       
       
      -

      To be efficient, the operations performed in the matrix-free implementation require knowledge of loop lengths at compile time, which are given by the degree of the finite element. Hence, we collect the values of the two template parameters that can be changed at one place in the code. Of course, one could make the degree of the finite element a run-time parameter by compiling the computational kernels for all degrees that are likely (say, between 1 and 6) and selecting the appropriate kernel at run time. Here, we simply choose second order $Q_2$ elements and choose dimension 3 as standard.

      +

      To be efficient, the operations performed in the matrix-free implementation require knowledge of loop lengths at compile time, which are given by the degree of the finite element. Hence, we collect the values of the two template parameters that can be changed at one place in the code. Of course, one could make the degree of the finite element a run-time parameter by compiling the computational kernels for all degrees that are likely (say, between 1 and 6) and selecting the appropriate kernel at run time. Here, we simply choose second order $Q_2$ elements and choose dimension 3 as standard.

        const unsigned int degree_finite_element = 2;
        const unsigned int dimension = 3;
       
       

      Equation data

      -

      We define a variable coefficient function for the Poisson problem. It is similar to the function in step-5 but we use the form $a(\mathbf
-   x)=\frac{1}{0.05 + 2\|\bf x\|^2}$ instead of a discontinuous one. It is merely to demonstrate the possibilities of this implementation, rather than making much sense physically. We define the coefficient in the same way as functions in earlier tutorial programs. There is one new function, namely a value method with template argument number.

      +

      We define a variable coefficient function for the Poisson problem. It is similar to the function in step-5 but we use the form $a(\mathbf
+   x)=\frac{1}{0.05 + 2\|\bf x\|^2}$ instead of a discontinuous one. It is merely to demonstrate the possibilities of this implementation, rather than making much sense physically. We define the coefficient in the same way as functions in earlier tutorial programs. There is one new function, namely a value method with template argument number.

        template <int dim>
        class Coefficient : public Function<dim>
        {
      @@ -570,15 +570,15 @@
    • Tell the FEEvaluation object the (macro) cell we want to work on.
    • -Read in the values of the source vectors (read_dof_values), including the resolution of constraints. This stores $u_\mathrm{cell}$ as described in the introduction.
    • +Read in the values of the source vectors (read_dof_values), including the resolution of constraints. This stores $u_\mathrm{cell}$ as described in the introduction.
    • -Compute the unit-cell gradient (the evaluation of finite element functions). Since FEEvaluation can combine value computations with gradient computations, it uses a unified interface to all kinds of derivatives of order between zero and two. We only want gradients, no values and no second derivatives, so we set the function arguments to true in the gradient slot (second slot), and to false in the values slot (first slot). There is also a third slot for the Hessian which is false by default, so it needs not be given. Note that the FEEvaluation class internally evaluates shape functions in an efficient way where one dimension is worked on at a time (using the tensor product form of shape functions and quadrature points as mentioned in the introduction). This gives complexity equal to $\mathcal O(d^2 (p+1)^{d+1})$ for polynomial degree $p$ in $d$ dimensions, compared to the naive approach with loops over all local degrees of freedom and quadrature points that is used in FEValues and costs $\mathcal O(d (p+1)^{2d})$.
    • +Compute the unit-cell gradient (the evaluation of finite element functions). Since FEEvaluation can combine value computations with gradient computations, it uses a unified interface to all kinds of derivatives of order between zero and two. We only want gradients, no values and no second derivatives, so we set the function arguments to true in the gradient slot (second slot), and to false in the values slot (first slot). There is also a third slot for the Hessian which is false by default, so it needs not be given. Note that the FEEvaluation class internally evaluates shape functions in an efficient way where one dimension is worked on at a time (using the tensor product form of shape functions and quadrature points as mentioned in the introduction). This gives complexity equal to $\mathcal O(d^2 (p+1)^{d+1})$ for polynomial degree $p$ in $d$ dimensions, compared to the naive approach with loops over all local degrees of freedom and quadrature points that is used in FEValues and costs $\mathcal O(d (p+1)^{2d})$.
    • Next comes the application of the Jacobian transformation, the multiplication by the variable coefficient and the quadrature weight. FEEvaluation has an access function get_gradient that applies the Jacobian and returns the gradient in real space. Then, we just need to multiply by the (scalar) coefficient, and let the function submit_gradient apply the second Jacobian (for the test function) and the quadrature weight and Jacobian determinant (JxW). Note that the submitted gradient is stored in the same data field as where it is read from in get_gradient. Therefore, you need to make sure to not read from the same quadrature point again after having called submit_gradient on that particular quadrature point. In general, it is a good idea to copy the result of get_gradient when it is used more often than once.
    • Next follows the summation over quadrature points for all test functions that corresponds to the actual integration step. For the Laplace operator, we just multiply by the gradient, so we call the integrate function with the respective argument set. If you have an equation where you test by both the values of the test functions and the gradients, both template arguments need to be set to true. Calling first the integrate function for values and then gradients in a separate call leads to wrong results, since the second call will internally overwrite the results from the first call. Note that there is no function argument for the second derivative for integrate step.
    • -Eventually, the local contributions in the vector $v_\mathrm{cell}$ as mentioned in the introduction need to be added into the result vector (and constraints are applied). This is done with a call to distribute_local_to_global, the same name as the corresponding function in the AffineConstraints (only that we now store the local vector in the FEEvaluation object, as are the indices between local and global degrees of freedom).
    • +Eventually, the local contributions in the vector $v_\mathrm{cell}$ as mentioned in the introduction need to be added into the result vector (and constraints are applied). This is done with a call to distribute_local_to_global, the same name as the corresponding function in the AffineConstraints (only that we now store the local vector in the FEEvaluation object, as are the indices between local and global degrees of freedom).
        template <int dim, int fe_degree, typename number>
        void LaplaceOperator<dim, fe_degree, number>::local_apply(
      @@ -662,9 +662,9 @@
      #define Assert(cond, exc)

      In the local compute loop, we compute the diagonal by a loop over all columns in the local matrix and putting the entry 1 in the ith slot and a zero entry in all other slots, i.e., we apply the cell-wise differential operator on one unit vector at a time. The inner part invoking FEEvaluation::evaluate(), the loop over quadrature points, and FEEvaluation::integrate(), is exactly the same as in the local_apply function. Afterwards, we pick out the ith entry of the local result and put it to a temporary storage (as we overwrite all entries in the array behind FEEvaluation::get_dof_value() with the next loop iteration). Finally, the temporary storage is written to the destination vector. Note how we use FEEvaluation::get_dof_value() and FEEvaluation::submit_dof_value() to read and write to the data field that FEEvaluation uses for the integration on the one hand and writes into the global vector on the other hand.

      -

      Given that we are only interested in the matrix diagonal, we simply throw away all other entries of the local matrix that have been computed along the way. While it might seem wasteful to compute the complete cell matrix and then throw away everything but the diagonal, the integration are so efficient that the computation does not take too much time. Note that the complexity of operator evaluation per element is $\mathcal
-   O((p+1)^{d+1})$ for polynomial degree $k$, so computing the whole matrix costs us $\mathcal O((p+1)^{2d+1})$ operations, not too far away from $\mathcal O((p+1)^{2d})$ complexity for computing the diagonal with FEValues. Since FEEvaluation is also considerably faster due to vectorization and other optimizations, the diagonal computation with this function is actually the fastest (simple) variant. (It would be possible to compute the diagonal with sum factorization techniques in $\mathcal
-   O((p+1)^{d+1})$ operations involving specifically adapted kernels—but since such kernels are only useful in that particular context and the diagonal computation is typically not on the critical path, they have not been implemented in deal.II.)

      +

      Given that we are only interested in the matrix diagonal, we simply throw away all other entries of the local matrix that have been computed along the way. While it might seem wasteful to compute the complete cell matrix and then throw away everything but the diagonal, the integration are so efficient that the computation does not take too much time. Note that the complexity of operator evaluation per element is $\mathcal
+   O((p+1)^{d+1})$ for polynomial degree $k$, so computing the whole matrix costs us $\mathcal O((p+1)^{2d+1})$ operations, not too far away from $\mathcal O((p+1)^{2d})$ complexity for computing the diagonal with FEValues. Since FEEvaluation is also considerably faster due to vectorization and other optimizations, the diagonal computation with this function is actually the fastest (simple) variant. (It would be possible to compute the diagonal with sum factorization techniques in $\mathcal
+   O((p+1)^{d+1})$ operations involving specifically adapted kernels—but since such kernels are only useful in that particular context and the diagonal computation is typically not on the critical path, they have not been implemented in deal.II.)

      Note that the code that calls distribute_local_to_global on the vector to accumulate the diagonal entries into the global matrix has some limitations. For operators with hanging node constraints that distribute an integral contribution of a constrained DoF to several other entries inside the distribute_local_to_global call, the vector interface used here does not exactly compute the diagonal entries, but lumps some contributions located on the diagonal of the local matrix that would end up in a off-diagonal position of the global matrix to the diagonal. The result is correct up to discretization accuracy as explained in Kormann (2016), section 5.3, but not mathematically equal. In this tutorial program, no harm can happen because the diagonal is only used for the multigrid level matrices where no hanging node constraints appear.

        template <int dim, int fe_degree, typename number>
        void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
      @@ -940,7 +940,7 @@
        time.restart();
       
      -

      As a smoother, this tutorial program uses a Chebyshev iteration instead of SOR in step-16. (SOR would be very difficult to implement because we do not have the matrix elements available explicitly, and it is difficult to make it work efficiently in parallel.) The smoother is initialized with our level matrices and the mandatory additional data for the Chebyshev smoother. We use a relatively high degree here (5), since matrix-vector products are comparably cheap. We choose to smooth out a range of $[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]$ in the smoother where $\hat{\lambda}_{\max}$ is an estimate of the largest eigenvalue (the factor 1.2 is applied inside PreconditionChebyshev). In order to compute that eigenvalue, the Chebyshev initialization performs a few steps of a CG algorithm without preconditioner. Since the highest eigenvalue is usually the easiest one to find and a rough estimate is enough, we choose 10 iterations. Finally, we also set the inner preconditioner type in the Chebyshev method which is a Jacobi iteration. This is represented by the DiagonalMatrix class that gets the inverse diagonal entry provided by our LaplaceOperator class.

      +

      As a smoother, this tutorial program uses a Chebyshev iteration instead of SOR in step-16. (SOR would be very difficult to implement because we do not have the matrix elements available explicitly, and it is difficult to make it work efficiently in parallel.) The smoother is initialized with our level matrices and the mandatory additional data for the Chebyshev smoother. We use a relatively high degree here (5), since matrix-vector products are comparably cheap. We choose to smooth out a range of $[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]$ in the smoother where $\hat{\lambda}_{\max}$ is an estimate of the largest eigenvalue (the factor 1.2 is applied inside PreconditionChebyshev). In order to compute that eigenvalue, the Chebyshev initialization performs a few steps of a CG algorithm without preconditioner. Since the highest eigenvalue is usually the easiest one to find and a rough estimate is enough, we choose 10 iterations. Finally, we also set the inner preconditioner type in the Chebyshev method which is a Jacobi iteration. This is represented by the DiagonalMatrix class that gets the inverse diagonal entry provided by our LaplaceOperator class.

      On level zero, we initialize the smoother differently because we want to use the Chebyshev iteration as a solver. PreconditionChebyshev allows the user to switch to solver mode where the number of iterations is internally chosen to the correct value. In the additional data object, this setting is activated by choosing the polynomial degree to numbers::invalid_unsigned_int. The algorithm will then attack all eigenvalues between the smallest and largest one in the coarse level matrix. The number of steps in the Chebyshev smoother are chosen such that the Chebyshev convergence estimates guarantee to reduce the residual by the number specified in the variable smoothing_range. Note that for solving, smoothing_range is a relative tolerance and chosen smaller than one, in this case, we select three orders of magnitude, whereas it is a number larger than 1 when only selected eigenvalues are smoothed.

      From a computational point of view, the Chebyshev iteration is a very attractive coarse grid solver as long as the coarse size is moderate. This is because the Chebyshev method performs only matrix-vector products and vector updates, which typically parallelize better to the largest cluster size with more than a few tens of thousands of cores than inner product involved in other iterative methods. The former involves only local communication between neighbors in the (coarse) mesh, whereas the latter requires global communication over all processors.

        using SmootherType =
      @@ -1153,7 +1153,7 @@

      Program output

      Since this example solves the same problem as step-5 (except for a different coefficient), there is little to say about the solution. We show a picture anyway, illustrating the size of the solution through both isocontours and volume rendering:

      -

      Of more interest is to evaluate some aspects of the multigrid solver. When we run this program in 2D for quadratic ( $Q_2$) elements, we get the following output (when run on one core in release mode):

      Vectorization over 2 doubles = 128 bits (SSE2)
      +

      Of more interest is to evaluate some aspects of the multigrid solver. When we run this program in 2D for quadratic ( $Q_2$) elements, we get the following output (when run on one core in release mode):

      Vectorization over 2 doubles = 128 bits (SSE2)
      Cycle 0
      Number of degrees of freedom: 81
      Total setup time (wall) 0.00159788s
      @@ -1220,7 +1220,7 @@
      Number of degrees of freedom: 2146689
      Total setup time (wall) 4.96491s
      Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
      -

      Since it is so easy, we look at what happens if we increase the polynomial degree. When selecting the degree as four in 3D, i.e., on $\mathcal Q_4$ elements, by changing the line const unsigned int degree_finite_element=4; at the top of the program, we get the following program output:

      +

      Since it is so easy, we look at what happens if we increase the polynomial degree. When selecting the degree as four in 3D, i.e., on $\mathcal Q_4$ elements, by changing the line const unsigned int degree_finite_element=4; at the top of the program, we get the following program output:

      Vectorization over 2 doubles = 128 bits (SSE2)
      Cycle 0
      Number of degrees of freedom: 729
      @@ -1251,7 +1251,7 @@
      Number of degrees of freedom: 16974593
      Total setup time (wall) 27.8989s
      Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
      -

      Since $\mathcal Q_4$ elements on a certain mesh correspond to $\mathcal Q_2$ elements on half the mesh size, we can compare the run time at cycle 4 with fourth degree polynomials with cycle 5 using quadratic polynomials, both at 2.1 million degrees of freedom. The surprising effect is that the solver for $\mathcal Q_4$ element is actually slightly faster than for the quadratic case, despite using one more linear iteration. The effect that higher-degree polynomials are similarly fast or even faster than lower degree ones is one of the main strengths of matrix-free operator evaluation through sum factorization, see the matrix-free paper. This is fundamentally different to matrix-based methods that get more expensive per unknown as the polynomial degree increases and the coupling gets denser.

      +

      Since $\mathcal Q_4$ elements on a certain mesh correspond to $\mathcal Q_2$ elements on half the mesh size, we can compare the run time at cycle 4 with fourth degree polynomials with cycle 5 using quadratic polynomials, both at 2.1 million degrees of freedom. The surprising effect is that the solver for $\mathcal Q_4$ element is actually slightly faster than for the quadratic case, despite using one more linear iteration. The effect that higher-degree polynomials are similarly fast or even faster than lower degree ones is one of the main strengths of matrix-free operator evaluation through sum factorization, see the matrix-free paper. This is fundamentally different to matrix-based methods that get more expensive per unknown as the polynomial degree increases and the coupling gets denser.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 2024-12-27 18:25:20.324952840 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 2024-12-27 18:25:20.332952895 +0000 @@ -143,52 +143,52 @@ This material is based upon work supported by the National Science Foundation under Grant No. DMS-0914977. Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation (NSF).

      Introduction

      -

      In this example, we show how to solve a partial differential equation (PDE) on a codimension one surface $\Gamma \subset \mathbb R^3$ made of quadrilaterals, i.e. on a surface in 3d or a line in 2d. We focus on the following elliptic second order PDE

      -\begin{align*}
+<p>In this example, we show how to solve a partial differential equation (PDE) on a codimension one surface <picture><source srcset=$\Gamma \subset \mathbb R^3$ made of quadrilaterals, i.e. on a surface in 3d or a line in 2d. We focus on the following elliptic second order PDE

      +\begin{align*}
 -\Delta_\Gamma u &= f \qquad \text{on } \qquad \Gamma,\\
 u  &= g \qquad \text{on} \qquad \partial \Gamma,
-\end{align*} +\end{align*}" src="form_4769.png"/>

      which generalized the Laplace equation we have previously solved in several of the early tutorial programs. Our implementation is based on step-4. step-34 also solves problems on lower dimensional surfaces; however, there we only consider integral equations that do not involve derivatives on the solution variable, while here we actually have to investigate what it means to take derivatives of a function only defined on a (possibly curved) surface.

      -

      In order to define the above operator, we start by introducing some notations. Let $\mathbf x_S:\hat S \rightarrow S$ be a parameterization of a surface $S$ from a reference element $\hat S \subset \mathbb R^2$, i.e. each point $\hat{\mathbf x}\in\hat S$ induces a point ${\mathbf
-  x}_S(\hat{\mathbf x}) \in S$. Then let

      -\[
+<p>In order to define the above operator, we start by introducing some notations. Let <picture><source srcset=$\mathbf x_S:\hat S \rightarrow S$ be a parameterization of a surface $S$ from a reference element $\hat S \subset \mathbb R^2$, i.e. each point $\hat{\mathbf x}\in\hat S$ induces a point ${\mathbf
+  x}_S(\hat{\mathbf x}) \in S$. Then let

      +\[
 G_S\dealcoloneq (D \mathbf{x}_S)^T \ D \mathbf{x}_S
-\] +\]" src="form_4774.png"/>

      -

      denotes the corresponding first fundamental form, where $D
-\mathbf{x}_S=\left(\frac{\partial x_{S,i}(\hat{\mathbf x})}{\partial \hat x_j}\right)_{ij}$ is the derivative (Jacobian) of the mapping. In the following, $S$ will be either the entire surface $\Gamma$ or, more convenient for the finite element method, any face $S \in
-{\mathbb T}$, where ${\mathbb T}$ is a partition (triangulation) of $\Gamma$ constituted of quadrilaterals. We are now in position to define the tangential gradient of a function $v : S \rightarrow \mathbb
-R$ by

      -\[
+<p> denotes the corresponding first fundamental form, where  <picture><source srcset=$D
+\mathbf{x}_S=\left(\frac{\partial x_{S,i}(\hat{\mathbf x})}{\partial \hat x_j}\right)_{ij}$ is the derivative (Jacobian) of the mapping. In the following, $S$ will be either the entire surface $\Gamma$ or, more convenient for the finite element method, any face $S \in
+{\mathbb T}$, where ${\mathbb T}$ is a partition (triangulation) of $\Gamma$ constituted of quadrilaterals. We are now in position to define the tangential gradient of a function $v : S \rightarrow \mathbb
+R$ by

      +\[
 (\nabla_S v)\circ \mathbf x_S \dealcoloneq  D \mathbf x_S \ G_S^{-1} \ \nabla (v \circ \mathbf x_S).
-\] +\]" src="form_4779.png"/>

      -

      The surface Laplacian (also called the Laplace-Beltrami operator) is then defined as $\Delta_S \dealcoloneq \nabla_S \cdot \nabla_S$. Note that an alternate way to compute the surface gradient on smooth surfaces $\Gamma$ is

      -\[
+<p> The surface Laplacian (also called the Laplace-Beltrami operator) is then defined as <picture><source srcset=$\Delta_S \dealcoloneq \nabla_S \cdot \nabla_S$. Note that an alternate way to compute the surface gradient on smooth surfaces $\Gamma$ is

      +\[
 \nabla_S v = \nabla \tilde v - \mathbf n (\mathbf n \cdot \nabla \tilde v),
-\] +\]" src="form_4781.png"/>

      -

      where $\tilde v$ is a "smooth" extension of $v$ in a tubular neighborhood of $\Gamma$ and $\mathbf n$ is the normal of $\Gamma$. Since $\Delta_S = \nabla_S \cdot \nabla_S$, we deduce

      -\[
+<p> where <picture><source srcset=$\tilde v$ is a "smooth" extension of $v$ in a tubular neighborhood of $\Gamma$ and $\mathbf n$ is the normal of $\Gamma$. Since $\Delta_S = \nabla_S \cdot \nabla_S$, we deduce

      +\[
 \Delta_S v = \Delta \tilde v - \mathbf n^T \ D^2 \tilde v \ \mathbf n - (\mathbf n \cdot \nabla \tilde v) (\nabla \cdot \mathbf n - \mathbf n^T \ D \mathbf n \ \mathbf n ).
-\] +\]" src="form_4784.png"/>

      -

      Worth mentioning, the term $\nabla \cdot \mathbf n - \mathbf n \ D \mathbf n \ \mathbf n$ appearing in the above expression is the total curvature of the surface (sum of principal curvatures).

      -

      As usual, we are only interested in weak solutions for which we can use $C^0$ finite elements (rather than requiring $C^1$ continuity as for strong solutions). We therefore resort to the weak formulation

      -\[
+<p> Worth mentioning, the term <picture><source srcset=$\nabla \cdot \mathbf n - \mathbf n \ D \mathbf n \ \mathbf n$ appearing in the above expression is the total curvature of the surface (sum of principal curvatures).

      +

      As usual, we are only interested in weak solutions for which we can use $C^0$ finite elements (rather than requiring $C^1$ continuity as for strong solutions). We therefore resort to the weak formulation

      +\[
 \int_\Gamma \nabla_\Gamma u \cdot
 \nabla_\Gamma v = \int_\Gamma f \ v  \qquad \forall v \in H^1_0(\Gamma)
-\] +\]" src="form_4786.png"/>

      -

      and take advantage of the partition ${\mathbb T}$ to further write

      -\[
+<p> and take advantage of the partition <picture><source srcset=${\mathbb T}$ to further write

      +\[
 \sum_{K\in  {\mathbb T}}\int_K \nabla_{K} u \cdot \nabla_{K} v = \sum_{K\in
   {\mathbb T}} \int_K f \ v  \qquad \forall v \in H^1_0(\Gamma).
-\] +\]" src="form_4787.png"/>

      -

      Moreover, each integral in the above expression is computed in the reference element $\hat K \dealcoloneq [0,1]^2$ so that

      -\begin{align*}
+<p> Moreover, each integral in the above expression is computed in the reference element <picture><source srcset=$\hat K \dealcoloneq [0,1]^2$ so that

      +\begin{align*}
 \int_{K} \nabla_{K} u \cdot \nabla_{K} v
 &=
 \int_{\hat K} \nabla (u \circ \mathbf x_K)^T G_K^{-1} (D \mathbf
@@ -198,30 +198,30 @@
 &=
 \int_{\hat K} \nabla (u \circ \mathbf x_K)^T G_K^{-1} \nabla (v \circ \mathbf x_K) \sqrt{\det
     (G_K)}
-\end{align*} +\end{align*}" src="form_4789.png"/>

      and

      -\[
+<picture><source srcset=\[
 \int_{K} f \ v = \int_{\hat K} (f \circ \mathbf x_K) (v \circ \mathbf
 x_K)  \sqrt{\det
     (G_K)}.
-\] +\]" src="form_4790.png"/>

      -

      Finally, we use a quadrature formula defined by points $\{p_l\}_{l=1}^N\subset
-\hat K$ and weights $\{w_l\}_{l=1}^N \subset \mathbb R^+_*$ to evaluate the above integrals and obtain

      -\[\int_{K} \nabla_{K} u \cdot \nabla_{K} v \approx \sum_{l=1}^N
+<p> Finally, we use a quadrature formula defined by points  <picture><source srcset=$\{p_l\}_{l=1}^N\subset
+\hat K$ and weights $\{w_l\}_{l=1}^N \subset \mathbb R^+_*$ to evaluate the above integrals and obtain

      +\[\int_{K} \nabla_{K} u \cdot \nabla_{K} v \approx \sum_{l=1}^N
  (\nabla (u \circ \mathbf x_K)(p_l))^T G^{-1}(p_l)  \nabla (v \circ \mathbf x_K)
 (p_l) \sqrt{\det (G(p_l))} \ w_l
-\] +\]" src="form_4793.png"/>

      and

      -\[
+<picture><source srcset=\[
 \int_{K} f \ v \approx \sum_{l=1}^N (f \circ \mathbf x_K)(p_l) \ (v \circ \mathbf x_K)(p_l) \sqrt{\det (G(p_l))} \ w_l.
-\] +\]" src="form_4794.png"/>

      Fortunately, deal.II has already all the tools to compute the above expressions. In fact, they barely differ from the ways in which we solve the usual Laplacian, only requiring the surface coordinate mapping to be provided in the constructor of the FEValues class. The surface description given, in the codimension one case, the two routines we need are the following:

        -
      • FEValues::shape_grad(i,l), which returns $D \mathbf x_K(p_l) G^{-1}(p_l)D(\varphi_i \circ \mathbf x_K)$
      • -
      • FEValues::JxW(l), which returns $\sqrt{\det (G(p_l))} \ w_l$. This provides exactly the terms we need for our computations.
      • +
      • FEValues::shape_grad(i,l), which returns $D \mathbf x_K(p_l) G^{-1}(p_l)D(\varphi_i \circ \mathbf x_K)$
      • +
      • FEValues::JxW(l), which returns $\sqrt{\det (G(p_l))} \ w_l$. This provides exactly the terms we need for our computations.

      On a more general note, details for the finite element approximation on surfaces can be found for instance in [Dziuk, in Partial differential equations and calculus of variations 1357, Lecture Notes in Math., 1988], [Demlow, SIAM J. Numer. Anal. 47(2), 2009] and [Bonito, Nochetto, and Pauletti, SIAM J. Numer. Anal. 48(5), 2010].

      Testcase

      @@ -229,19 +229,19 @@

      We produce one test case for a 2d problem and another one for 3d:

      • -

        In 2d, let's choose as domain a half circle. On this domain, we choose the function $u(\mathbf x)=-2x_1x_2$ as the solution. To compute the right hand side, we have to compute the surface Laplacian of the solution function. There are (at least) two ways to do that. The first one is to project away the normal derivative as described above using the natural extension of $u(\mathbf x)$ (still denoted by $u$) over $\mathbb R^d$, i.e. to compute

        -\[
+<p class=In 2d, let's choose as domain a half circle. On this domain, we choose the function $u(\mathbf x)=-2x_1x_2$ as the solution. To compute the right hand side, we have to compute the surface Laplacian of the solution function. There are (at least) two ways to do that. The first one is to project away the normal derivative as described above using the natural extension of $u(\mathbf x)$ (still denoted by $u$) over $\mathbb R^d$, i.e. to compute

        +\[
     -\Delta_\Gamma u =  \Delta u - \mathbf n^T \ D^2 u \ \mathbf n - (\mathbf n \cdot \nabla u)\ \kappa,
-  \] + \]" src="form_4799.png"/>

        -

        where $\kappa$ is the total curvature of $\Gamma$. Since we are on the unit circle, $\mathbf n=\mathbf x$ and $\kappa = 1$ so that

        -\[
+<p> where <picture><source srcset=$\kappa$ is the total curvature of $\Gamma$. Since we are on the unit circle, $\mathbf n=\mathbf x$ and $\kappa = 1$ so that

        +\[
     -\Delta_\Gamma u = -8 x_1x_2.
-  \] + \]" src="form_4802.png"/>

        -

        A somewhat simpler way, at least for the current case of a curve in two-dimensional space, is to note that we can map the interval $t \in
-  [0,\pi]$ onto the domain $\Omega$ using the transformation $\mathbf x(t)= \left(\begin{array}{c} \cos t \\ \sin t \end{array}\right)$. At position $\mathbf x=\mathbf x(t)$, the value of the solution is then $u(\mathbf x(t)) = -2\cos t \sin t$. Taking into account that the transformation is length preserving, i.e. a segment of length $dt$ is mapped onto a piece of curve of exactly the same length, the tangential Laplacian then satisfies

        -\begin{align*}
+<p class=A somewhat simpler way, at least for the current case of a curve in two-dimensional space, is to note that we can map the interval $t \in
+  [0,\pi]$ onto the domain $\Omega$ using the transformation $\mathbf x(t)= \left(\begin{array}{c} \cos t \\ \sin t \end{array}\right)$. At position $\mathbf x=\mathbf x(t)$, the value of the solution is then $u(\mathbf x(t)) = -2\cos t \sin t$. Taking into account that the transformation is length preserving, i.e. a segment of length $dt$ is mapped onto a piece of curve of exactly the same length, the tangential Laplacian then satisfies

        +\begin{align*}
     \Delta_\Gamma u
     &= \frac{d^2}{dt^2}(-2\cos t \sin t)
     = -2 \frac{d}{dt}(-\sin^2 t + \cos^2 t)
@@ -250,16 +250,16 @@
     &= 8 \sin t \cos t
     \\
     &= 8 x_1x_2,
-  \end{align*} + \end{align*}" src="form_4807.png"/>

        which is of course the same result as we had above.

      • -In 3d, the domain is again half of the surface of the unit ball, i.e. a half sphere or dome. We choose $u(\mathbf x)=-2\sin(\pi x_1)\cos(\pi x_2)e^{x_3}$ as the solution. We can compute the right hand side of the equation, $f=-\Delta_\Gamma u$, in the same way as the method above (with $\kappa = 2$), yielding an awkward and lengthy expression. You can find the full expression in the source code.
      • +In 3d, the domain is again half of the surface of the unit ball, i.e. a half sphere or dome. We choose $u(\mathbf x)=-2\sin(\pi x_1)\cos(\pi x_2)e^{x_3}$ as the solution. We can compute the right hand side of the equation, $f=-\Delta_\Gamma u$, in the same way as the method above (with $\kappa = 2$), yielding an awkward and lengthy expression. You can find the full expression in the source code.
      -

      In the program, we will also compute the $H^1$ seminorm error of the solution. Since the solution function and its numerical approximation are only defined on the manifold, the obvious definition of this error functional is $| e |_{H^1(\Gamma)}
+<p>In the program, we will also compute the <picture><source srcset=$H^1$ seminorm error of the solution. Since the solution function and its numerical approximation are only defined on the manifold, the obvious definition of this error functional is $| e |_{H^1(\Gamma)}
   = | \nabla_\Gamma e |_{L_2(\Gamma)}
-  = \left( \int_\Gamma | \nabla_\Gamma (u-u_h) |^2 \right)^{1/2}$. This requires us to provide the tangential gradient $\nabla_\Gamma u$ to the function VectorTools::integrate_difference (first introduced in step-7), which we will do by implementing the function Solution::gradient in the program below.

      + = \left( \int_\Gamma | \nabla_\Gamma (u-u_h) |^2 \right)^{1/2}$" src="form_4811.png"/>. This requires us to provide the tangential gradient $\nabla_\Gamma u$ to the function VectorTools::integrate_difference (first introduced in step-7), which we will do by implementing the function Solution::gradient in the program below.

      Implementation

      If you've read through step-4 and understand the discussion above of how solution and right hand side correspond to each other, you will be immediately familiar with this program as well. In fact, there are only two things that are of significance:

        @@ -531,7 +531,7 @@
        return_type extract_boundary_mesh(const MeshType< dim, spacedim > &volume_mesh, MeshType< dim - 1, spacedim > &surface_mesh, const std::set< types::boundary_id > &boundary_ids=std::set< types::boundary_id >())
        void half_hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1.)

        LaplaceBeltramiProblem::assemble_system

        -

        The following is the central function of this program, assembling the matrix that corresponds to the surface Laplacian (Laplace-Beltrami operator). Maybe surprisingly, it actually looks exactly the same as for the regular Laplace operator discussed in, for example, step-4. The key is that the FEValues::shape_grad() function does the magic: It returns the surface gradient $\nabla_K \phi_i(x_q)$ of the $i$th shape function at the $q$th quadrature point. The rest then does not need any changes either:

        +

        The following is the central function of this program, assembling the matrix that corresponds to the surface Laplacian (Laplace-Beltrami operator). Maybe surprisingly, it actually looks exactly the same as for the regular Laplace operator discussed in, for example, step-4. The key is that the FEValues::shape_grad() function does the magic: It returns the surface gradient $\nabla_K \phi_i(x_q)$ of the $i$th shape function at the $q$th quadrature point. The rest then does not need any changes either:

          template <int spacedim>
          void LaplaceBeltramiProblem<spacedim>::assemble_system()
          {
        @@ -801,7 +801,7 @@
        void refine_global(const unsigned int times=1)
        void transform(const Transformation &transformation, Triangulation< dim, spacedim > &triangulation)

        Note that the only essential addition is the line marked with asterisks. It is worth pointing out one other thing here, though: because we detach the manifold description from the surface mesh, whenever we use a mapping object in the rest of the program, it has no curves boundary description to go on any more. Rather, it will have to use the implicit, FlatManifold class that is used on all parts of the domain not explicitly assigned a different manifold object. Consequently, whether we use MappingQ(2), MappingQ(15) or MappingQ1, each cell of our mesh will be mapped using a bilinear approximation.

        -

        All these drawbacks aside, the resulting pictures are still pretty. The only other differences to what's in step-38 is that we changed the right hand side to $f(\mathbf x)=\sin x_3$ and the boundary values (through the Solution class) to $u(\mathbf x)|_{\partial\Omega}=\cos x_3$. Of course, we now no longer know the exact solution, so the computation of the error at the end of LaplaceBeltrami::run will yield a meaningless number.

        +

        All these drawbacks aside, the resulting pictures are still pretty. The only other differences to what's in step-38 is that we changed the right hand side to $f(\mathbf x)=\sin x_3$ and the boundary values (through the Solution class) to $u(\mathbf x)|_{\partial\Omega}=\cos x_3$. Of course, we now no longer know the exact solution, so the computation of the error at the end of LaplaceBeltrami::run will yield a meaningless number.

        The plain program

        /* ------------------------------------------------------------------------
        *
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 2024-12-27 18:25:20.408953417 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 2024-12-27 18:25:20.416953472 +0000 @@ -127,18 +127,18 @@

    In this program, we use the interior penalty method and Nitsche's weak boundary conditions to solve Poisson's equation. We use multigrid methods on locally refined meshes, which are generated using a bulk criterion and a standard error estimator based on cell and face residuals. All operators are implemented using the MeshWorker interface.

    -

    Like in step-12, the discretization relies on finite element spaces, which are polynomial inside the mesh cells $K\in \mathbb T_h$, but have no continuity between cells. Since such functions have two values on each interior face $F\in \mathbb F_h^i$, one from each side, we define mean value and jump operators as follows: let K1 and K2 be the two cells sharing a face, and let the traces of functions ui and the outer normal vectors ni be labeled accordingly. Then, on the face, we let

    -\[
+<p>Like in <a class=step-12, the discretization relies on finite element spaces, which are polynomial inside the mesh cells $K\in \mathbb T_h$, but have no continuity between cells. Since such functions have two values on each interior face $F\in \mathbb F_h^i$, one from each side, we define mean value and jump operators as follows: let K1 and K2 be the two cells sharing a face, and let the traces of functions ui and the outer normal vectors ni be labeled accordingly. Then, on the face, we let

    +\[
         \average{ u } = \frac{u_1 + u_2}2
-\] +\]" src="form_4818.png"/>

    Note, that if such an expression contains a normal vector, the averaging operator turns into a jump. The interior penalty method for the problem

    -\[
+<picture><source srcset=\[
   -\Delta u = f \text{ in }\Omega \qquad u = u^D \text{ on } \partial\Omega
-\] +\]" src="form_4819.png"/>

    becomes

    -\begin{multline*}
+<picture><source srcset=\begin{multline*}
   \sum_{K\in \mathbb T_h} (\nabla u, \nabla v)_K
   \\
   + \sum_{F \in F_h^i} \biggl\{4\sigma_F (\average{ u \mathbf n}, \average{ v \mathbf n })_F
@@ -154,17 +154,17 @@
   = (f, v)_\Omega + \sum_{F \in F_h^b} \biggl\{
   2\sigma_F (u^D, v)_F - (\partial_n v,u^D)_F
   \biggr\}.
-\end{multline*} +\end{multline*}" src="form_4820.png"/>

    -

    Here, $\sigma_F$ is the penalty parameter, which is chosen as follows: for a face F of a cell K, compute the value

    -\[
+<p>Here, <picture><source srcset=$\sigma_F$ is the penalty parameter, which is chosen as follows: for a face F of a cell K, compute the value

    +\[
 \sigma_{F,K} = p(p+1) \frac{|F|_{d-1}}{|K|_d},
-\] +\]" src="form_4822.png"/>

    -

    where p is the polynomial degree of the finite element functions and $|\cdot|_d$ and $|\cdot|_{d-1}$ denote the $d$ and $d-1$ dimensional Hausdorff measure of the corresponding object. If the face is at the boundary, choose $\sigma_F = \sigma_{F,K}$. For an interior face, we take the average of the two values at this face.

    +

    where p is the polynomial degree of the finite element functions and $|\cdot|_d$ and $|\cdot|_{d-1}$ denote the $d$ and $d-1$ dimensional Hausdorff measure of the corresponding object. If the face is at the boundary, choose $\sigma_F = \sigma_{F,K}$. For an interior face, we take the average of the two values at this face.

    In our finite element program, we distinguish three different integrals, corresponding to the sums over cells, interior faces and boundary faces above. Since the MeshWorker::loop organizes the sums for us, we only need to implement the integrals over each mesh element. The class MatrixIntegrator below has these three functions for the left hand side of the formula, the class RHSIntegrator for the right.

    As we will see below, even the error estimate is of the same structure, since it can be written as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \eta^2 &= \eta_K^2 + \eta_F^2 + \eta_B^2
   \\
   \eta_K^2 &= \sum_{K\in \mathbb T_h} h^2 \|f + \Delta u_h\|^2
@@ -173,7 +173,7 @@
     4 \sigma_F \| \average{u_h\mathbf n} \|^2 + h \|\average{\partial_n u_h}\|^2 \biggr\}
   \\
   \eta_B^2 &= \sum_{F \in F_h^b} 2\sigma_F \| u_h-u^D \|^2.
-\end{align*} +\end{align*}" src="form_4827.png"/>

    Thus, the functions for assembling matrices, right hand side and error estimates below exhibit that these loops are all generic and can be programmed in the same way.

    This program is related to step-12, in that it uses MeshWorker and discontinuous Galerkin methods. There we solved an advection problem, while here it is a diffusion problem. Here, we also use multigrid preconditioning and a theoretically justified error estimator, see Karakashian and Pascal (2003). The multilevel scheme was discussed in detail in Kanschat (2004). The adaptive iteration and its convergence have been discussed (for triangular meshes) in Hoppe, Kanschat, and Warburton (2009).

    @@ -234,7 +234,7 @@

    The MeshWorker::loop() function separates what needs to be done for local integration, from the loops over cells and faces. It does this by calling functions that integrate over a cell, a boundary face, or an interior face, and letting them create the local contributions and then in a separate step calling a function that moves these local contributions into the global objects. We will use this approach for computing the matrices, the right hand side, the error estimator, and the actual error computation in the functions below. For each of these operations, we provide a namespace that contains a set of functions for cell, boundary, and interior face contributions.

    All the information needed for these local integration is provided by MeshWorker::DoFInfo<dim> and MeshWorker::IntegrationInfo<dim>. In each case, the functions' signatures is fixed: MeshWorker::loop() wants to call functions with a specific set of arguments, so the signature of the functions cannot be changed.

    The first namespace defining local integrators is responsible for assembling the global matrix as well as the level matrices. On each cell, we integrate the Dirichlet form as well as the Nitsche boundary conditions and the interior penalty fluxes between cells.

    -

    The boundary and flux terms need a penalty parameter, which should be adjusted to the cell size and the polynomial degree. We compute it in two steps: First, we compute on each cell $K_i$ the value $P_i = p_i(p_i+1)/h_i$, where $p_i$ is the polynomial degree on cell $K_i$ and $h_i$ is the length of $K_i$ orthogonal to the current face. Second, if exactly one of the two cells adjacent to the face has children, its penalty is multiplied by two (to account for the fact that the mesh size $h_i$ there is only half that previously computed); it is possible that both adjacent cells are refined, in which case we are integrating over a non-active face and no adjustment is necessary. Finally, we return the average of the two penalty values.

    +

    The boundary and flux terms need a penalty parameter, which should be adjusted to the cell size and the polynomial degree. We compute it in two steps: First, we compute on each cell $K_i$ the value $P_i = p_i(p_i+1)/h_i$, where $p_i$ is the polynomial degree on cell $K_i$ and $h_i$ is the length of $K_i$ orthogonal to the current face. Second, if exactly one of the two cells adjacent to the face has children, its penalty is multiplied by two (to account for the fact that the mesh size $h_i$ there is only half that previously computed); it is possible that both adjacent cells are refined, in which case we are integrating over a non-active face and no adjustment is necessary. Finally, we return the average of the two penalty values.

      namespace MatrixIntegrator
      {
      template <int dim>
    @@ -535,10 +535,10 @@
    unsigned int tensor_degree() const

    Finally we have an integrator for the error. Since the energy norm for discontinuous Galerkin problems not only involves the difference of the gradient inside the cells, but also the jump terms across faces and at the boundary, we cannot just use VectorTools::integrate_difference(). Instead, we use the MeshWorker interface to compute the error ourselves.

    There are several different ways to define this energy norm, but all of them are equivalent to each other uniformly with mesh size (some not uniformly with polynomial degree). Here, we choose

    -\[ \|u\|_{1,h} =
+<picture><source srcset=\[ \|u\|_{1,h} =
    \sum_{K\in \mathbb T_h} \|\nabla u\|_K^2 + \sum_{F \in F_h^i}
    4\sigma_F\|\average{ u \mathbf n}\|^2_F + \sum_{F \in F_h^b}
-   2\sigma_F\|u\|^2_F \] + 2\sigma_F\|u\|^2_F \]" src="form_4829.png"/>

    Below, the first function is, as always, the integration on cells. There is currently no good interface in MeshWorker that would allow us to access values of regular functions in the quadrature points. Thus, we have to create the vectors for the exact function's values and gradients inside the cell integrator. After that, everything is as before and we just add up the squares of the differences.

    Additionally to computing the error in the energy norm, we use the capability of the mesh worker to compute two functionals at the same time and compute the L2-error in the same loop. Obviously, this one does not have any jump terms and only appears in the integration on cells.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-12-27 18:25:20.464953802 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-12-27 18:25:20.468953829 +0000 @@ -260,7 +260,7 @@ but I don't actually know any such function with this name and these arguments."

    But back to the concrete case here: For this tutorial, we choose as right hand side the function $4(x^4+y^4)$ in 2d, or $4(x^4+y^4+z^4)$ in 3d. We could write this distinction using an if-statement on the space dimension, but here is a simple way that also allows us to use the same function in 1d (or in 4D, if you should desire to do so), by using a short loop. Fortunately, the compiler knows the size of the loop at compile time (remember that at the time when you define the template, the compiler doesn't know the value of dim, but when it later encounters a statement or declaration RightHandSide<2>, it will take the template, replace all occurrences of dim by 2 and compile the resulting function). In other words, at the time of compiling this function, the number of times the body will be executed is known, and the compiler can minimize the overhead needed for the loop; the result will be as fast as if we had used the formulas above right away.

    -

    The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

    +

    The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

      template <int dim>
      double RightHandSide<dim>::value(const Point<dim> &p,
      const unsigned int /*component*/) const
    @@ -393,7 +393,7 @@
      right_hand_side.value(x_q) * // f(x_q)
      fe_values.JxW(q_index)); // dx
      }
    -

    As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

    +

    As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

    With the local systems assembled, the transfer into the global matrix and right hand side is done exactly as before, but here we have again merged some loops for efficiency:

      cell->get_dof_indices(local_dof_indices);
      for (const unsigned int i : fe_values.dof_indices())
    @@ -518,7 +518,7 @@
    -
    Note
    A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.
    +
    Note
    A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.

    Postprocessing: What to do with the solution?

    This tutorial – like most of the other programs – principally only shows how to numerically approximate the solution of a partial differential equation, and then how to visualize this solution graphically. But solving a PDE is of course not the goal in most practical applications (unless you are a numerical methods developer and the method is the goal): We generally want to solve a PDE because we want to extract information from it. Examples for what people are interested in from solutions include the following:

    • Let's say you solve the equations of elasticity (which we will do in step-8), then that's presumably because you want to know about the deformation of an elastic object under a given load. From an engineering perspective, what you then presumably want to learn is the degree of deformation of the object, say at a specific point; or you may want to know the maximum stress in order to determine whether the applied load exceeds the safe maximal stress the material can withstand.
    • @@ -548,8 +548,8 @@ \sum_K \sum_q u_h(\mathbf x_q^K) w_q^K, \end{align*}" src="form_4841.png"/>

      -

      where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

      -

      The actual implementation of this is straightforward once you know how to get the values of the solution $u$ at the quadrature points of a cell. This functionality is provided by FEValues::get_function_values(), a function that takes a global vector of nodal values as input and returns a vector of function values at the quadrature points of the current cell. Using this function, to see how it all works together you can place the following code snippet anywhere in the program after the solution has been computed (the output_results() function seems like a good place to also do postprocessing, for example):

      QGauss<dim> quadrature_formula(fe.degree + 1);
      +

      where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

      +

      The actual implementation of this is straightforward once you know how to get the values of the solution $u$ at the quadrature points of a cell. This functionality is provided by FEValues::get_function_values(), a function that takes a global vector of nodal values as input and returns a vector of function values at the quadrature points of the current cell. Using this function, to see how it all works together you can place the following code snippet anywhere in the program after the solution has been computed (the output_results() function seems like a good place to also do postprocessing, for example):

      QGauss<dim> quadrature_formula(fe.degree + 1);
      FEValues<dim> fe_values(fe,
      quadrature_formula,
      @@ -594,7 +594,7 @@ \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f, \end{align*}" src="form_4845.png"/>

      -

      where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

      +

      where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

      This all is easily implemented by the following code that replaces the use of the FEValues class (which is used for integrating over cells – i.e., domain integrals) by the FEFaceValues class (which is used for integrating over faces – i.e., boundary integrals):

      QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
      FEFaceValues<dim> fe_face_values(fe,
      face_quadrature_formula,
      @@ -640,7 +640,7 @@

      Possibilities for extensions

      There are many ways with which one can play with this program. The simpler ones include essentially all the possibilities already discussed in the Possibilities for extensions in the documentation of step 3, except that you will have to think about whether something now also applies to the 3d case discussed in the current program.

      It is also worthwhile considering the postprocessing options discussed above. The documentation states two numbers (the mean value and the normal flux) for both the 2d and 3d cases. Can we trust these numbers? We have convinced ourselves that at least the mean value is reasonable, and that the sign of the flux is probably correct. But are these numbers accurate?

      -

      A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

      +

      A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

      To test this kind of thing, we have already considered the convergence of a point value in step-3. We can do the same here by selecting how many times the mesh is globally refined in the make_grid() function of this program. For the mean value of the solution, we then get the following numbers:

      @@ -681,7 +681,7 @@

      So this is not great. For the 2d case, we might infer that perhaps a value around -6.4 might be right if we just refine the mesh enough – though 11 refinements already leads to some 4,194,304 cells. In any case, the first number (the one shown in the beginning where we discussed postprocessing) was off by almost a factor of 2!

      For the 3d case, the last number shown was on a mesh with 2,097,152 cells; the next one would have had 8 times as many cells. In any case, the numbers mean that we can't even be sure that the first digit of that last number is correct! In other words, it was worth checking, or we would have just believed all of these numbers. In fact, that last column isn't even doing a particularly good job convincing us that the code might be correctly implemented.

      If you keep reading through the other tutorial programs, you will find many ways to make these sorts of computations more accurate and to come to believe that the flux actually does converge to its correct value. For example, we can dramatically increase the accuracy of the computation by using adaptive mesh refinement (step-6) near the boundary, and in particular by using higher polynomial degree finite elements (also step-6, but also step-7). Using the latter, using cubic elements (polynomial degree 3), we can actually compute the flux pretty accurately even in 3d: $\Phi_h=-19.0148$ with 4 global refinement steps, and $\Phi_h=-19.1533$ with 5 refinement steps. These numbers are already pretty close together and give us a reasonable idea of the first two correct digits of the "true" answer.

      -
      Note
      We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
+<dl class=
      Note
      We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
     C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}$. The move from values to gradients reduces the convergence rates by one order, and the move from domain to boundary by another half order. Here, then, each refinement step reduces the error not by a factor of 4 any more, by only by a factor of $\sqrt{2} \approx 1.4$. It takes a lot of global refinement steps to reduce the error by, say, a factor ten or hundred, and this is reflected in the very slow convergence evidenced by the table. On the other hand, for cubic elements (i.e., polynomial degree 3), we would get $\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}$ and after reduction by 1.5 orders, we would still have $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
     C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}$. This rate, ${\cal O}(h^{2.5})$ is still quite rapid, and it is perhaps not surprising that we get much better answers with these higher order elements. This also illustrates that when trying to approximate anything that relates to a gradient of the solution, using linear elements (polynomial degree one) is really not a good choice at all.
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-12-27 18:25:20.508954104 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-12-27 18:25:20.516954159 +0000 @@ -150,7 +150,7 @@

      A general overview of how this parallelization happens is described in the Parallel computing with multiple processors using distributed memory documentation topic. You should read it for a top-level overview before reading through the source code of this program. A concise discussion of many terms we will use in the program is also provided in the Distributed Computing paper. It is probably worthwhile reading it for background information on how things work internally in this program.

      Linear algebra

      step-17 and step-18 already used parallel linear algebra classes, but since the current program is the first one that really covers parallel computing, it is probably worth giving a broad overview of parallel linear algebra here as well.

      -

      First, the general mantra mentioned above was that everything has to be distributed. It does not scale if one process (or in fact all processes) have to keep a complete triangulation or even a substantial share of it; it all only works if every one of the $N$ processes in the parallel universe keep at most a small multiple of one $N$th of the triangulation. Similarly, each process can only hold a small multiple of one $N$th of each solution or right hand side vector, and of the system matrix.

      +

      First, the general mantra mentioned above was that everything has to be distributed. It does not scale if one process (or in fact all processes) have to keep a complete triangulation or even a substantial share of it; it all only works if every one of the $N$ processes in the parallel universe keep at most a small multiple of one $N$th of the triangulation. Similarly, each process can only hold a small multiple of one $N$th of each solution or right hand side vector, and of the system matrix.

      To this end, deal.II has acquired interfaces to a number of packages that provide these kind of distributed linear algebra data structures. More specifically, deal.II comes with a number of "sub-packages" that all provide vector, matrix, and linear solver classes that are typically named the same or very similarly, but live in different namespaces:

      • deal.II's own linear algebra classes. These are what we have been using in step-1 to step-6, for example, along with most of the other programs in the deal.II tutorial. These classes are all not parallel in the sense that they do not use MPI, can not subdivide the data among processes, or work on them on processes that cannot access each other's memory directory. (On the other hand, many of these classes actually use multiple threads internally, to use the multiple processor cores available on today's laptops and work stations.) These classes reside in the top-level namespace dealii.
      • Interfaces to the PETSc library's implementations of linear algebra functionality. These are found in namespace PETScWrappers. PETSc is a library that has built a large collection of linear algebra, linear solvers, nonlinear solvers, time steppers, and other functionality that runs on some of the largest machines in the world in parallel, using MPI.
      • @@ -224,7 +224,7 @@
         

        The following, however, will be new or be used in new roles. Let's walk through them. The first of these will provide the tools of the Utilities::System namespace that we will use to query things like the number of processors associated with the current MPI universe, or the number within this universe the processor this job runs on is:

          #href_anchor"fragment">
          #include <deal.II/base/conditional_ostream.h>
        -

        After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory topic, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

        +

        After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory topic, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

          #href_anchor"el" href="namespaceSparsityTools.html#a6b5444028171035f8ffb3fb5c3f8da08">SparsityTools::distribute_sparsity_pattern. The role of this function will be explained below.

          #include <deal.II/lac/sparsity_tools.h>

        The final two, new header files provide the class parallel::distributed::Triangulation that provides meshes distributed across a potentially very large number of processors, while the second provides the namespace parallel::distributed::GridRefinement that offers functions that can adaptively refine such distributed meshes:

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-12-27 18:25:20.572954543 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-12-27 18:25:20.580954598 +0000 @@ -168,10 +168,10 @@ u(\mathbf x) &\geq g(\mathbf x) & &\quad\text{in } \Omega, \end{align*}" src="form_4871.png"/>

        -

        where $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

        +

        where $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

        Intuitively, gravity acts downward and so $f(\mathbf x)$ is a negative function (we choose $f=-10$ in this program). The first condition then means that the total force acting on the membrane is gravity plus something positive: namely the upward force that the obstacle exerts on the membrane at those places where the two of them are in contact. How big is this additional force? We don't know yet (and neither do we know "where" it actually acts) but it must be so that the membrane doesn't penetrate the obstacle.

        The fourth equality above together with the last inequality forms the obstacle condition which has to hold at every point of the whole domain. The latter of these two means that the membrane must be above the obstacle $g(\mathbf x)$ everywhere. The second to last equation, often called the "complementarity -condition" says that where the membrane is not in contact with the obstacle (i.e., those $\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
+condition$\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
 \neq 0$, i.e., there can be additional forces (though there don't have to be: it is possible for the membrane to just touch, not press against, the obstacle).

        Derivation of the variational inequality

        An obvious way to obtain the variational formulation of the obstacle problem is to consider the total potential energy:

        @@ -190,20 +190,20 @@ \end{equation*}" src="form_4880.png"/>

        This set takes care of the third and fifth conditions above (the boundary values and the complementarity condition).

        -

        Consider now the minimizer $u\in G$ of $E$ and any other function $v\in
+<p>Consider now the minimizer <picture><source srcset=$u\in G$ of $E$ and any other function $v\in
 G$. Then the function

        \begin{equation*}
  F(\varepsilon) \dealcoloneq E(u+\varepsilon(v-u)),\quad\varepsilon\in\left[0,1\right],
 \end{equation*}

        -

        takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

        +

        takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

        Find a function $u\in G$ with

        \begin{equation*}
  \left(\nabla u, \nabla(v-u)\right) \geq \left(f,v-u\right) \quad \forall v\in G.
 \end{equation*}

        -

        This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

        -

        On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

        +

        This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

        +

        On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

        Formulation as a saddle point problem

        The variational inequality above is awkward to work with. We would therefore like to reformulate it as an equivalent saddle point problem. We introduce a Lagrange multiplier $\lambda$ and the convex cone $K\subset V'$, $V'$ dual space of $V$, $K \dealcoloneq \{\mu\in V': \langle\mu,v\rangle\geq 0,\quad \forall
 v\in V, v \le 0 \}$ of Lagrange multipliers, where $\langle\cdot,\cdot\rangle$ denotes the duality pairing between $V'$ and $V$. Intuitively, $K$ is the cone of all "non-positive @@ -224,21 +224,21 @@

        The existence and uniqueness of $(u,\lambda)\in V\times K$ of this saddle point problem has been stated in Glowinski, Lions and Trémolières: Numerical Analysis of Variational Inequalities, North-Holland, 1981.

        Active Set methods to solve the saddle point problem

        There are different methods to solve the variational inequality. As one possibility you can understand the saddle point problem as a convex quadratic program (QP) with inequality constraints.

        -

        To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

        +

        To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

        \begin{eqnarray*}
  &A U + B\Lambda = F,&\\
  &[BU-G]_i \geq 0, \quad \Lambda_i \leq 0,\quad \Lambda_i[BU-G]_i = 0
 \qquad \forall i.&
 \end{eqnarray*}

        -

        where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

        +

        where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

        \begin{align*}
   B_{ii} = \int_\Omega \varphi_i(\mathbf x)^2\ \textrm{d}x,
   \qquad
   B_{ij}=0 \ \text{for } i\neq j.
 \end{align*}

        -

        To define $G$ we use the same technique as for $B$. In other words, we define

        +

        To define $G$ we use the same technique as for $B$. In other words, we define

        \begin{align*}
   G_{i} = \int_\Omega g_h(x) \varphi_i(\mathbf x)\ \textrm{d}x,
 \end{align*} @@ -250,7 +250,7 @@ \qquad \forall i\in{\cal S}.& \end{eqnarray*}" src="form_4913.png"/>

        -

        Now we define for each degree of freedom $i$ the function

        +

        Now we define for each degree of freedom $i$ the function

        \begin{equation*}
  C([BU]_i,\Lambda_i) \dealcoloneq -\Lambda_i + \min\lbrace 0, \Lambda_i + c([BU]_i - G_i) \rbrace,
 \end{equation*} @@ -261,7 +261,7 @@ C([BU]_i,\Lambda_i) = 0, \qquad \forall i\in{\cal S}. \end{equation*}" src="form_4918.png"/>

        -

        The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

        +

        The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

        The primal-dual active set algorithm

        The algorithm for the primal-dual active set method works as follows (NOTE: $B = B^T$):

          @@ -451,7 +451,7 @@
           

        The ObstacleProblem class template

        -

        This class supplies all function and variables needed to describe the obstacle problem. It is close to what we had to do in step-4, and so relatively simple. The only real new components are the update_solution_and_constraints function that computes the active set and a number of variables that are necessary to describe the original (unconstrained) form of the linear system (complete_system_matrix and complete_system_rhs) as well as the active set itself and the diagonal of the mass matrix $B$ used in scaling Lagrange multipliers in the active set formulation. The rest is as in step-4:

        +

        This class supplies all function and variables needed to describe the obstacle problem. It is close to what we had to do in step-4, and so relatively simple. The only real new components are the update_solution_and_constraints function that computes the active set and a number of variables that are necessary to describe the original (unconstrained) form of the linear system (complete_system_matrix and complete_system_rhs) as well as the active set itself and the diagonal of the mass matrix $B$ used in scaling Lagrange multipliers in the active set formulation. The rest is as in step-4:

          template <int dim>
          class ObstacleProblem
          {
        @@ -494,7 +494,7 @@

        Right hand side, boundary values, and the obstacle

        -

        In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

        +

        In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

          template <int dim>
          class RightHandSide : public Function<dim>
          {
        @@ -616,7 +616,7 @@
        void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
        void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
        -

        The only other thing to do here is to compute the factors in the $B$ matrix which is used to scale the residual. As discussed in the introduction, we'll use a little trick to make this mass matrix diagonal, and in the following then first compute all of this as a matrix and then extract the diagonal elements for later use:

        +

        The only other thing to do here is to compute the factors in the $B$ matrix which is used to scale the residual. As discussed in the introduction, we'll use a little trick to make this mass matrix diagonal, and in the following then first compute all of this as a matrix and then extract the diagonal elements for later use:

          mass_matrix.reinit(dsp);
          assemble_mass_matrix_diagonal(mass_matrix);
        @@ -694,7 +694,7 @@
        @ update_gradients
        Shape function gradients.
        @ update_quadrature_points
        Transformed quadrature points.

        ObstacleProblem::assemble_mass_matrix_diagonal

        -

        The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

        +

        The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

        Note that at the time this function is called, the constraints object only contains boundary value constraints; we therefore do not have to pay attention in the last copy-local-to-global step to preserve the values of matrix entries that may later on be constrained by the active set.

        Note also that the trick with the trapezoidal rule only works if we have in fact $Q_1$ elements. For higher order elements, one would need to use a quadrature formula that has quadrature points at all the support points of the finite element. Constructing such a quadrature formula isn't really difficult, but not the point here, and so we simply assert at the top of the function that our implicit assumption about the finite element is in fact satisfied.

          template <int dim>
        @@ -812,7 +812,7 @@
          }
         

        ObstacleProblem::solve

        -

        There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

        +

        There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

          template <int dim>
          void ObstacleProblem<dim>::solve()
          {
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-12-27 18:25:20.704955450 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-12-27 18:25:20.700955422 +0000 @@ -186,10 +186,10 @@ \end{align*}" src="form_4964.png"/>

        Here, the first of these equations defines the relationship between strain $\varepsilon(\mathbf u)=\frac{1}{2}\left(\nabla \mathbf u
-  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
-\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
+  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
+\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
 = \tau - \dfrac{1}{3}tr(\tau)I$ is the deviatoric part of a tensor and $|\cdot|$ denotes the Frobenius norm.

        -

        Further equations describe a fixed, zero displacement on $\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
+<p>Further equations describe a fixed, zero displacement on <picture><source srcset=$\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
   \mathbf n)$ exerted by the obstacle is inward (no "pull" by the obstacle on our body) and with zero tangential component $\mathbf \sigma_t= \sigma \mathbf n - \mathbf \sigma_n \mathbf n
 = \sigma \mathbf n - [\mathbf n \cdot(\sigma \mathbf n)]\mathbf n$. The last condition is again a complementarity condition that implies that on $\Gamma_C$, the normal force can only be nonzero if the body is in contact with the obstacle; the second part describes the impenetrability of the obstacle and the body. The last two equations are commonly referred to as the Signorini contact conditions.

        Most materials - especially metals - have the property that they show some hardening as a result of deformation. In other words, $\sigma_{\text{yield}}$ increases with deformation. In practice, it is not the elastic deformation that results in hardening, but the plastic component. There are different constitutive laws to describe those material behaviors. The simplest one is called linear isotropic hardening described by the flow function $\mathcal{F}(\sigma,\varepsilon^p) = \vert\sigma^D\vert - (\sigma_0 +
@@ -236,7 +236,7 @@
 <p>A strict approach would keep the active set fixed while we iterate the Newton method to convergence (or maybe the other way around: find the final active set before moving on to the next Newton iteration). In practice, it turns out that it is sufficient to do only a single Newton step per active set iteration, and so we will iterate over them concurrently. We will also, every once in a while, refine the mesh.</p>
 <p><a class=

        A Newton method for the plastic nonlinearity

        As mentioned, we will treat the nonlinearity of the operator $P_\Pi$ by applying a Newton method, despite the fact that the operator is not differentiable in the strict sense. However, it satisfies the conditions of slant differentiability and this turns out to be enough for Newton's method to work. The resulting method then goes by the name semi-smooth Newton method, which sounds impressive but is, in reality, just a Newton method applied to a semi-smooth function with an appropriately chosen "derivative".

        -

        In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

        +

        In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

        \begin{align*}
   \label{eq:linearization}
   \left(I_{\Pi}\varepsilon(\tilde {\mathbf u}^{i}),
@@ -267,7 +267,7 @@
     I\right),
 \end{gather*}

        -

        where $I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

        +

        where $I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

        Note that this problem corresponds to a linear elastic contact problem where $I_\Pi$ plays the role of the elasticity tensor $C=A^{-1}$. Indeed, if the material is not plastic at a point, then $I_\Pi=C$. However, at places where the material is plastic, $I_\Pi$ is a spatially varying function. In any case, the system we have to solve for the Newton iterate $\tilde {\mathbf u}^{i}$ gets us closer to the goal of rewriting our problem in a way that allows us to use well-known solvers and preconditioners for elliptic systems.

        As a final note about the Newton method let us mention that as is common with Newton methods we need to globalize it by controlling the step length. In other words, while the system above solves for $\tilde {\mathbf u}^{i}$, the final iterate will rather be

        \begin{align*}
@@ -343,14 +343,14 @@
 If <picture><source srcset=$\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\left\|
  {\hat R}\left({\mathbf u}^{i}\right) \right\|_{\ell_2} < \delta$ then stop, else set $i=i+1$ and go to step (1). This step ensures that we only stop iterations if both the correct active set has been found and the plasticity has been iterated to sufficient accuracy. -

        In step 3 of this algorithm, the matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the bases for the displacements and Lagrange multiplier (contact forces) and it is not quadratic in our situation since $\Lambda^k$ is only defined on $\Gamma_C$, i.e., the surface where contact may happen. As shown in the paper, we can choose $B$ to be a matrix that has only one entry per row, (see also Hüeber, Wohlmuth: A primal-dual active set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg. 194, 2005, pp. 3147-3166). The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$

        +

        In step 3 of this algorithm, the matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the bases for the displacements and Lagrange multiplier (contact forces) and it is not quadratic in our situation since $\Lambda^k$ is only defined on $\Gamma_C$, i.e., the surface where contact may happen. As shown in the paper, we can choose $B$ to be a matrix that has only one entry per row, (see also Hüeber, Wohlmuth: A primal-dual active set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg. 194, 2005, pp. 3147-3166). The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$

        \begin{gather*}G_p = \begin{cases}
 g_{h,p}, & \text{if}\quad p\in\mathcal{S}\\
 0, & \text{if}\quad p\notin\mathcal{S}.
 \end{cases}\end{gather*}

        Adaptive mesh refinement

        -

        Since we run our program in 3d, the computations the program performs are expensive. Consequently using adaptive mesh refinement is an important step towards staying within acceptable run-times. To make our lives easier we simply choose the KellyErrorEstimator that is already implemented in deal.II. We hand the solution vector to it which contains the displacement $u$. As we will see in the results it yields a quite reasonable adaptive mesh for the contact zone as well as for plasticity.

        +

        Since we run our program in 3d, the computations the program performs are expensive. Consequently using adaptive mesh refinement is an important step towards staying within acceptable run-times. To make our lives easier we simply choose the KellyErrorEstimator that is already implemented in deal.II. We hand the solution vector to it which contains the displacement $u$. As we will see in the results it yields a quite reasonable adaptive mesh for the contact zone as well as for plasticity.

        Implementation

        This tutorial is essentially a mixture of step-40 and step-41 but instead of PETSc we let the Trilinos library deal with parallelizing the linear algebra (like in step-32). Since we are trying to solve a similar problem like in step-41 we will use the same methods but now in parallel.

        A difficulty is handling of the constraints from the Dirichlet conditions, hanging nodes and the inequality condition that arises from the contact. For this purpose we create three objects of type AffineConstraints that describe the various constraints and that we will combine as appropriate in each iteration.

        @@ -442,7 +442,7 @@
         

        The ConstitutiveLaw class template

        -

        This class provides an interface for a constitutive law, i.e., for the relationship between strain $\varepsilon(\mathbf u)$ and stress $\sigma$. In this example we are using an elastoplastic material behavior with linear, isotropic hardening. Such materials are characterized by Young's modulus $E$, Poisson's ratio $\nu$, the initial yield stress $\sigma_0$ and the isotropic hardening parameter $\gamma$. For $\gamma =
+<p>This class provides an interface for a constitutive law, i.e., for the relationship between strain <picture><source srcset=$\varepsilon(\mathbf u)$ and stress $\sigma$. In this example we are using an elastoplastic material behavior with linear, isotropic hardening. Such materials are characterized by Young's modulus $E$, Poisson's ratio $\nu$, the initial yield stress $\sigma_0$ and the isotropic hardening parameter $\gamma$. For $\gamma =
    0$ we obtain perfect elastoplastic behavior.

        As explained in the paper that describes this program, the first Newton steps are solved with a completely elastic material model to avoid having to deal with both nonlinearities (plasticity and contact) at once. To this end, this class has a function set_sigma_0() that we use later on to simply set $\sigma_0$ to a very large value – essentially guaranteeing that the actual stress will not exceed it, and thereby producing an elastic material. When we are ready to use a plastic model, we set $\sigma_0$ back to its proper value, using the same function. As a result of this approach, we need to leave sigma_0 as the only non-const member variable of this class.

          template <int dim>
        @@ -477,7 +477,7 @@
         

        The constructor of the ConstitutiveLaw class sets the required material parameter for our deformable body. Material parameters for elastic isotropic media can be defined in a variety of ways, such as the pair $E,
-   \nu$ (elastic modulus and Poisson's number), using the Lamé parameters $\lambda,mu$ or several other commonly used conventions. Here, the constructor takes a description of material parameters in the form of $E,\nu$, but since this turns out to these are not the coefficients that appear in the equations of the plastic projector, we immediately convert them into the more suitable set $\kappa,\mu$ of bulk and shear moduli. In addition, the constructor takes $\sigma_0$ (the yield stress absent any plastic strain) and $\gamma$ (the hardening parameter) as arguments. In this constructor, we also compute the two principal components of the stress-strain relation and its linearization.

        + \nu$" src="form_5042.png"/> (elastic modulus and Poisson's number), using the Lamé parameters $\lambda,mu$ or several other commonly used conventions. Here, the constructor takes a description of material parameters in the form of $E,\nu$, but since this turns out to these are not the coefficients that appear in the equations of the plastic projector, we immediately convert them into the more suitable set $\kappa,\mu$ of bulk and shear moduli. In addition, the constructor takes $\sigma_0$ (the yield stress absent any plastic strain) and $\gamma$ (the hardening parameter) as arguments. In this constructor, we also compute the two principal components of the stress-strain relation and its linearization.

          template <int dim>
          ConstitutiveLaw<dim>::ConstitutiveLaw(double E,
          double nu,
        @@ -713,7 +713,7 @@

        The BitmapFile and ChineseObstacle classes

        The following two classes describe the obstacle outlined in the introduction, i.e., the Chinese character. The first of the two, BitmapFile is responsible for reading in data from a picture file stored in pbm ascii format. This data will be bilinearly interpolated and thereby provides a function that describes the obstacle. (The code below shows how one can construct a function by interpolating between given data points. One could use the Functions::InterpolatedUniformGridData, introduced after this tutorial program was written, which does exactly what we want here, but it is instructive to see how to do it by hand.)

        The data which we read from the file will be stored in a double std::vector named obstacle_data. This vector composes the base to calculate a piecewise bilinear function as a polynomial interpolation. The data we will read from a file consists of zeros (white) and ones (black).

        -

        The hx,hy variables denote the spacing between pixels in $x$ and $y$ directions. nx,ny are the numbers of pixels in each of these directions. get_value() returns the value of the image at a given location, interpolated from the adjacent pixel values.

        +

        The hx,hy variables denote the spacing between pixels in $x$ and $y$ directions. nx,ny are the numbers of pixels in each of these directions. get_value() returns the value of the image at a given location, interpolated from the adjacent pixel values.

          template <int dim>
          class BitmapFile
          {
        @@ -919,7 +919,7 @@ -

        The next block of variables corresponds to the solution and the linear systems we need to form. In particular, this includes the Newton matrix and right hand side; the vector that corresponds to the residual (i.e., the Newton right hand side) but from which we have not eliminated the various constraints and that is used to determine which degrees of freedom need to be constrained in the next iteration; and a vector that corresponds to the diagonal of the $B$ matrix briefly mentioned in the introduction and discussed in the accompanying paper.

        +

        The next block of variables corresponds to the solution and the linear systems we need to form. In particular, this includes the Newton matrix and right hand side; the vector that corresponds to the residual (i.e., the Newton right hand side) but from which we have not eliminated the various constraints and that is used to determine which degrees of freedom need to be constrained in the next iteration; and a vector that corresponds to the diagonal of the $B$ matrix briefly mentioned in the introduction and discussed in the accompanying paper.

        PlasticityContactProblem::assemble_mass_matrix_diagonal

        -

        The next helper function computes the (diagonal) mass matrix that is used to determine the active set of the active set method we use in the contact algorithm. This matrix is of mass matrix type, but unlike the standard mass matrix, we can make it diagonal (even in the case of higher order elements) by using a quadrature formula that has its quadrature points at exactly the same locations as the interpolation points for the finite element are located. We achieve this by using a QGaussLobatto quadrature formula here, along with initializing the finite element with a set of interpolation points derived from the same quadrature formula. The remainder of the function is relatively straightforward: we put the resulting matrix into the given argument; because we know the matrix is diagonal, it is sufficient to have a loop over only $i$ and not over $j$. Strictly speaking, we could even avoid multiplying the shape function's values at quadrature point q_point by itself because we know the shape value to be a vector with exactly one one which when dotted with itself yields one. Since this function is not time critical we add this term for clarity.

        +

        The next helper function computes the (diagonal) mass matrix that is used to determine the active set of the active set method we use in the contact algorithm. This matrix is of mass matrix type, but unlike the standard mass matrix, we can make it diagonal (even in the case of higher order elements) by using a quadrature formula that has its quadrature points at exactly the same locations as the interpolation points for the finite element are located. We achieve this by using a QGaussLobatto quadrature formula here, along with initializing the finite element with a set of interpolation points derived from the same quadrature formula. The remainder of the function is relatively straightforward: we put the resulting matrix into the given argument; because we know the matrix is diagonal, it is sufficient to have a loop over only $i$ and not over $j$. Strictly speaking, we could even avoid multiplying the shape function's values at quadrature point q_point by itself because we know the shape value to be a vector with exactly one one which when dotted with itself yields one. Since this function is not time critical we add this term for clarity.

          template <int dim>
          void PlasticityContactProblem<dim>::assemble_mass_matrix_diagonal(
        @@ -1500,8 +1500,8 @@
        @ update_gradients
        Shape function gradients.
        -

        Having computed the stress-strain tensor and its linearization, we can now put together the parts of the matrix and right hand side. In both, we need the linearized stress-strain tensor times the symmetric gradient of $\varphi_i$, i.e. the term $I_\Pi\varepsilon(\varphi_i)$, so we introduce an abbreviation of this term. Recall that the matrix corresponds to the bilinear form $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the notation of the accompanying publication, whereas the right hand side is $F_i=([I_\Pi-P_\Pi
-   C]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$ where $u$ is the current linearization points (typically the last solution). This might suggest that the right hand side will be zero if the material is completely elastic (where $I_\Pi=P_\Pi$) but this ignores the fact that the right hand side will also contain contributions from non-homogeneous constraints due to the contact.

        +

        Having computed the stress-strain tensor and its linearization, we can now put together the parts of the matrix and right hand side. In both, we need the linearized stress-strain tensor times the symmetric gradient of $\varphi_i$, i.e. the term $I_\Pi\varepsilon(\varphi_i)$, so we introduce an abbreviation of this term. Recall that the matrix corresponds to the bilinear form $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the notation of the accompanying publication, whereas the right hand side is $F_i=([I_\Pi-P_\Pi
+   C]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$ where $u$ is the current linearization points (typically the last solution). This might suggest that the right hand side will be zero if the material is completely elastic (where $I_\Pi=P_\Pi$) but this ignores the fact that the right hand side will also contain contributions from non-homogeneous constraints due to the contact.

        The code block that follows this adds contributions that are due to boundary forces, should there be any.

          const SymmetricTensor<2, dim> stress_phi_i =
          stress_strain_tensor_linearized *
        @@ -1672,11 +1672,11 @@

        PlasticityContactProblem::solve_newton_system

        The last piece before we can discuss the actual Newton iteration on a single mesh is the solver for the linear systems. There are a couple of complications that slightly obscure the code, but mostly it is just setup then solve. Among the complications are:

          -
        • For the hanging nodes we have to apply the AffineConstraints::set_zero function to newton_rhs. This is necessary if a hanging node with solution value $x_0$ has one neighbor with value $x_1$ which is in contact with the obstacle and one neighbor $x_2$ which is not in contact. Because the update for the former will be prescribed, the hanging node constraint will have an inhomogeneity and will look like $x_0 = x_1/2 +
+<li>For the hanging nodes we have to apply the <a class=AffineConstraints::set_zero function to newton_rhs. This is necessary if a hanging node with solution value $x_0$ has one neighbor with value $x_1$ which is in contact with the obstacle and one neighbor $x_2$ which is not in contact. Because the update for the former will be prescribed, the hanging node constraint will have an inhomogeneity and will look like $x_0 = x_1/2 +
    \text{gap}/2$. So the corresponding entries in the right-hand-side are non-zero with a meaningless value. These values we have to set to zero.
        • Like in step-40, we need to shuffle between vectors that do and do not have ghost elements when solving or using the solution.
        -

        The rest of the function is similar to step-40 and step-41 except that we use a BiCGStab solver instead of CG. This is due to the fact that for very small hardening parameters $\gamma$, the linear system becomes almost semidefinite though still symmetric. BiCGStab appears to have an easier time with such linear systems.

        +

        The rest of the function is similar to step-40 and step-41 except that we use a BiCGStab solver instead of CG. This is due to the fact that for very small hardening parameters $\gamma$, the linear system becomes almost semidefinite though still symmetric. BiCGStab appears to have an easier time with such linear systems.

          template <int dim>
          void PlasticityContactProblem<dim>::solve_newton_system()
          {
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 2024-12-27 18:25:20.832956329 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 2024-12-27 18:25:20.836956356 +0000 @@ -190,37 +190,37 @@

        Much inspiration for this program comes from step-31 but several of the techniques discussed here are original.

        Advection-dominated two-phase flow mathematical model

        We consider the flow of a two-phase immiscible, incompressible fluid. Capillary and gravity effects are neglected, and viscous effects are assumed dominant. The governing equations for such a flow that are identical to those used in step-21 and are

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf{u}_t &= - \mathbf{K} \lambda_t \left(S\right) \nabla p, \\
   \nabla \cdot \mathbf{u}_t &= q, \\
   \epsilon \frac{\partial S}{\partial t} + \nabla \cdot \left( \mathbf{u}_t  F\left( S \right) \right)&=0,
-\end{align*} +\end{align*}" src="form_5061.png"/>

        -

        where $S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

        -\begin{align*}
+<p> where <picture><source srcset=$S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

        +\begin{align*}
    \lambda_t(S)&= \lambda_w + \lambda_{nw} = \frac{k_{rw}(S)}{\mu_w} + \frac{k_{rnw}(S)}{\mu_{nw}}, \\
    F(S) &= \frac{\lambda_w}{\lambda_t} = \frac{\lambda_w}{\lambda_w + \lambda_{nw}} = \frac{k_{rw}(S)/\mu_w}{k_{rw}(S)/\mu_w + k_{rnw}(S)/\mu_{nw}}, \\
    \mathbf{u}_t &= \mathbf{u}_w + \mathbf{u}_{nw} = -\lambda_t(S)\mathbf{K} \cdot \nabla p,
-\end{align*} +\end{align*}" src="form_5065.png"/>

        -

        where subscripts $w, nw$ represent the wetting and non-wetting phases, respectively.

        -

        For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

        -\begin{align*}
+<p> where subscripts <picture><source srcset=$w, nw$ represent the wetting and non-wetting phases, respectively.

        +

        For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

        +\begin{align*}
    k_{rw}  &= S^2, \qquad&\qquad
    k_{rnw} &= \left( 1-S \right)^2.
-\end{align*} +\end{align*}" src="form_5069.png"/>

        The porous media equations above are augmented by initial conditions for the saturation and boundary conditions for the pressure. Since saturation and the gradient of the pressure uniquely determine the velocity, no boundary conditions are necessary for the velocity. Since the flow equations do not contain time derivatives, initial conditions for the velocity and pressure variables are not required. The flow field separates the boundary into inflow or outflow parts. Specifically,

        -\[
+<picture><source srcset=\[
    \mathbf{\Gamma}_{in}(t) = \left\{\mathbf{x} \in \partial \Omega:\mathbf{n} \cdot \mathbf{u}_t<0\right\},
-\] +\]" src="form_5070.png"/>

        -

        and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

        +

        and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

        Adaptive operator splitting and time stepping

        As seen in step-21, solving the flow equations for velocity and pressure are the parts of the program that take far longer than the (explicit) updating step for the saturation variable once we know the flow variables. On the other hand, the pressure and velocity depend only weakly on saturation, so one may think about only solving for pressure and velocity every few time steps while updating the saturation in every step. If we can find a criterion for when the flow variables need to be updated, we call this splitting an "adaptive operator splitting" scheme.

        Here, we use the following a posteriori criterion to decide when to re-compute pressure and velocity variables (detailed derivations and descriptions can be found in [Chueh2013]):

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \theta(n,n_p)
   =
     \max_{\kappa\in{\mathbb T}}
@@ -230,49 +230,49 @@
       - \frac 1{\lambda_t\left(S^{(n_p)}\right)} \right\|_{L^\infty(\kappa)}
     \left\|\|\mathbf{K}^{-1}\|_1\right\|_{L^\infty(\kappa)}
     \right).
-\end{align*} +\end{align*}" src="form_5072.png"/>

        -

        where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and $n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

        -

        In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

        -\[
+<p> where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and <picture><source srcset=$n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

        +

        In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

        +\[
    \Delta t_p^{(n)} = \sum_{i=n_p+1}^{n} \Delta t_c^{(i)}.
-\] +\]" src="form_5076.png"/>

        We choose the length of (micro) steps subject to the Courant-Friedrichs-Lewy (CFL) restriction according to the criterion

        -\[
+<picture><source srcset=\[
   \Delta t_c = \frac{\textrm{min}_{K}h_{K}}{7 \|\mathbf{u}_t\|_{L^{\infty}\left(\Omega\right)}},
-\] +\]" src="form_5077.png"/>

        which we have confirmed to be stable for the choice of finite element and time stepping scheme for the saturation equation discussed below ( $h_K$ denotes the diameter of cell $K$). The result is a scheme where neither micro nor macro time steps are of uniform length, and both are chosen adaptively.

        Time discretization

        Using this time discretization, we obtain the following set of equations for each time step from the IMPES approach (see step-21):

        -\begin{align*}
+<picture><source srcset=\begin{align*}
    \mathbf{u}^{(n)}_t + \lambda_t\left(S^{(n-1)}\right) \mathbf{K} \nabla p^{(n)} =0, \\
    \nabla \cdot \mathbf{u}^{(n)}_t = q, \\
    \epsilon \left( \frac{S^{(n-1)}-S^{(n)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right) \nabla \cdot \mathbf{u}^{(n)}_t =0.
-\end{align*} +\end{align*}" src="form_5078.png"/>

        -

        Using the fact that $\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

        -\begin{align*}
+<p>Using the fact that <picture><source srcset=$\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

        +\begin{align*}
   &\epsilon \left( \frac{S^{(n)}-S^{(n-1)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right)q=0.
-\end{align*} +\end{align*}" src="form_5080.png"/>

        Weak form, space discretization for the pressure-velocity part

        -

        By multiplying the equations defining the total velocity $\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

        -\begin{gather*}
+<p>By multiplying the equations defining the total velocity <picture><source srcset=$\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

        +\begin{gather*}
    \left( \left( \mathbf{K} \lambda_t\left(S^{(n-1)}\right) \right)^{-1} \mathbf{u}^{(n)}_t, \mathbf{v}\right)_{\Omega} - \left(p^{(n)}, \nabla \cdot \mathbf{v}\right)_{\Omega} = -\left(p^{(n)}, \mathbf{n} \cdot \mathbf{v} \right)_{\partial \Omega}, \\
    - \left( \nabla \cdot \mathbf{u}^{(n)}_t,w\right)_{\Omega} = - \big(q,w\big)_{\Omega}.
-\end{gather*} +\end{gather*}" src="form_5084.png"/>

        -

        Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
-\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
-u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
-v=0$.

        -

        We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

        +

        Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
+\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
+u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
+v=0$.

        +

        We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

        Stabilization, weak form and space discretization for the saturation transport equation

        The chosen $Q_1$ elements for the saturation equation do not lead to a stable discretization without upwinding or other kinds of stabilization, and spurious oscillations will appear in the numerical solution. Adding an artificial diffusion term is one approach to eliminating these oscillations [Chen2005]. On the other hand, adding too much diffusion smears sharp fronts in the solution and suffers from grid-orientation difficulties [Chen2005]. To avoid these effects, we use the artificial diffusion term proposed by [GuermondPasquetti2008] and validated in [Chueh2013] and [KHB12], as well as in step-31.

        This method modifies the (discrete) weak form of the saturation equation to read

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \left(\epsilon \frac{\partial S_h}{\partial t},\sigma_h\right)
   -
   \left(\mathbf{u}_t  F\left( S_h \right),
@@ -285,53 +285,53 @@
   &=0
   \qquad
   \forall \sigma_h,
-\end{align*} +\end{align*}" src="form_5092.png"/>

        -

        where $\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

        -

        Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

        -\[
+<p> where <picture><source srcset=$\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

        +

        Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

        +\[
    \nu(S_h)|_{K} = \beta \| \mathbf{u}_t \max\{F'(S_h),1\} \|_{L^{\infty}(K)} \textrm{min} \left\{ h_{K},h^{\alpha}_{K} \frac{\|\textrm{Res}(S_h)\|_{L^{\infty}(K)}}{c(\mathbf{u}_t,S)} \right\}
-\] +\]" src="form_5095.png"/>

        -

        where $\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

        -\[
+<p> where <picture><source srcset=$\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

        +\[
    c(\mathbf{u}_t,S) = c_R \|\mathbf{u}_t \max\{F'(S),1\}\|_{L^{\infty}(\Omega)} \textrm{var}(S)^\alpha | \textrm{diam} (\Omega) |^{\alpha - 2}
-\] +\]" src="form_5098.png"/>

        and

        -\[
+<picture><source srcset=\[
    \textrm{Res}(S) = \left( \epsilon \frac{\partial S}{\partial t} + \mathbf{u}_t \cdot \nabla F(S) + F(S)q \right) \cdot S^{\alpha - 1}
-\] +\]" src="form_5099.png"/>

        -

        where $c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
-\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

        -

        This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

        -\begin{align*}
+<p> where <picture><source srcset=$c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
+\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

        +

        This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

        +\begin{align*}
    &\left( \epsilon S_h^{(n)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\mathbf{u}^{*}_t,\nabla\sigma_h\Big)_{\Omega} + \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{*}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
    & \quad = \left( \epsilon S_h^{(n-1)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \bigg(\nu\left(S_h^{(n-1)}\right)\nabla S_h^{(n-1)},\nabla\sigma_h\bigg)_{\Omega} \nonumber \\
    & \qquad + \Delta t^{(n)}_c \bigg(\mathbf{n}\cdot\nu\left(S_h^{(n-1)}\right)\nabla S^{(n-1)},\sigma_h\bigg)_{\partial\Omega}
-\end{align*} +\end{align*}" src="form_5102.png"/>

        -

        where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

        +

        where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

        Since the Dirichlet boundary conditions for saturation are only imposed on the inflow boundaries, the third term on the left hand side of the equation above needs to be split further into two parts:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   &\Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
   &\qquad= \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(+)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(+)}\right),\sigma_h\Big)_{\partial\Omega_{(+)}} + \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(-)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(-)}\right),\sigma_h\Big)_{\partial\Omega_{(-)}}
-\end{align*} +\end{align*}" src="form_5110.png"/>

        -

        where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
-  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
-  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

        +

        where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
+  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
+  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

        Adaptive mesh refinement

        Choosing meshes adaptively to resolve sharp saturation fronts is an essential ingredient to achieve efficiency in our algorithm. Here, we use the same shock-type refinement approach used in [Chueh2013] to select those cells that should be refined or coarsened. The refinement indicator for each cell $K$ of the triangulation is computed by

        -\[
+<picture><source srcset=\[
    \eta_{K} = |\nabla S_h(\mathbf x_K)|
-\] +\]" src="form_5118.png"/>

        -

        where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

        +

        where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

        The linear system and its preconditioning

        -

        Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step $(n)$ of the following form:

        -\[
+<p>Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step <picture><source srcset=$(n)$ of the following form:

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-12-27 18:25:20.992957427 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-12-27 18:25:20.996957455 +0000 @@ -205,143 +205,143 @@

        Notation

        One can think of fourth-order tensors as linear operators mapping second-order tensors (matrices) onto themselves in much the same way as matrices map vectors onto vectors. There are various fourth-order unit tensors that will be required in the forthcoming presentation. The fourth-order unit tensors $\mathcal{I}$ and $\overline{\mathcal{I}}$ are defined by

        -\[
+<picture><source srcset=\[
         \mathbf{A} = \mathcal{I}:\mathbf{A}
                 \qquad \text{and} \qquad
         \mathbf{A}^T = \overline{\mathcal{I}}:\mathbf{A} \, .
-\] +\]" src="form_5166.png"/>

        Note $\mathcal{I} \neq \overline{\mathcal{I}}^T$. Furthermore, we define the symmetric and skew-symmetric fourth-order unit tensors by

        -\[
+<picture><source srcset=\[
         \mathcal{S} \dealcoloneq \dfrac{1}{2}[\mathcal{I} + \overline{\mathcal{I}}]
                 \qquad \text{and} \qquad
         \mathcal{W} \dealcoloneq \dfrac{1}{2}[\mathcal{I} - \overline{\mathcal{I}}] \, ,
-\] +\]" src="form_5167.png"/>

        such that

        -\[
+<picture><source srcset=\[
         \dfrac{1}{2}[\mathbf{A} + \mathbf{A}^T] = \mathcal{S}:\mathbf{A}
                 \qquad \text{and} \qquad
         \dfrac{1}{2}[\mathbf{A} - \mathbf{A}^T] = \mathcal{W}:\mathbf{A} \, .
-\] +\]" src="form_5168.png"/>

        The fourth-order SymmetricTensor returned by identity_tensor() is $\mathcal{S}$.

        Kinematics

        -

        Let the time domain be denoted $\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

        -\[
+<p>Let the time domain be denoted <picture><source srcset=$\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

        +\[
         \mathbf{x} = \boldsymbol{\varphi}(\mathbf{X},t) \, .
-\] +\]" src="form_5174.png"/>

        The material description of the displacement of a particle is defined by

        -\[
+<picture><source srcset=\[
         \mathbf{U}(\mathbf{X},t) = \mathbf{x}(\mathbf{X},t) - \mathbf{X} \, .
-\] +\]" src="form_5175.png"/>

        The deformation gradient $\mathbf{F}$ is defined as the material gradient of the motion:

        -\[
+<picture><source srcset=\[
         \mathbf{F}(\mathbf{X},t)
                 \dealcoloneq \dfrac{\partial \boldsymbol{\varphi}(\mathbf{X},t)}{\partial \mathbf{X}}
                 = \textrm{Grad}\ \mathbf{x}(\mathbf{X},t)
                 = \mathbf{I} + \textrm{Grad}\ \mathbf{U} \, .
-\] +\]" src="form_5176.png"/>

        -

        The determinant of the of the deformation gradient $J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

        -\[
+<p> The determinant of the of the deformation gradient <picture><source srcset=$J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

        +\[
         \textrm{d}v = J(\mathbf{X},t)\; \textrm{d}V \, .
-\] +\]" src="form_5180.png"/>

        -

        Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

        +

        Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

        The Green-Lagrange strain tensor is defined by

        -\[
+<picture><source srcset=\[
         \mathbf{E} \dealcoloneq \frac{1}{2}[\mathbf{C} - \mathbf{I} ]
                 = \underbrace{\frac{1}{2}[\textrm{Grad}^T \mathbf{U} +  \textrm{Grad}\mathbf{U}]}_{\boldsymbol{\varepsilon}}
                         + \frac{1}{2}[\textrm{Grad}^T\ \mathbf{U}][\textrm{Grad}\ \mathbf{U}] \, .
-\] +\]" src="form_5183.png"/>

        If the assumption of infinitesimal deformations is made, then the second term on the right can be neglected, and $\boldsymbol{\varepsilon}$ (the linearised strain tensor) is the only component of the strain tensor. This assumption is, looking at the setup of the problem, not valid in step-18, making the use of the linearized $\boldsymbol{\varepsilon}$ as the strain measure in that tutorial program questionable.

        -

        In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient $\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

        -\[
+<p>In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient <picture><source srcset=$\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

        +\[
         \mathbf{F}
                 = (J^{1/3}\mathbf{I})\overline{\mathbf{F}}
         \qquad \text{and} \qquad
         \mathbf{b}
         = (J^{2/3}\mathbf{I})\overline{\mathbf{F}}\,\overline{\mathbf{F}}^T
                 =  (J^{2/3}\mathbf{I})\overline{\mathbf{b}} \, .
-\] +\]" src="form_5184.png"/>

        -

        Clearly, $\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

        -

        The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

        -\[
+<p> Clearly, <picture><source srcset=$\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

        +

        The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

        +\[
         \mathbf{l}(\mathbf{x},t)
                 \dealcoloneq \dfrac{\partial \mathbf{v}(\mathbf{x},t)}{\partial \mathbf{x}}
                 = \textrm{grad}\ \mathbf{v}(\mathbf{x},t) \, ,
-\] +\]" src="form_5188.png"/>

        -

        where $\textrm{grad} \{\bullet \}
+<p> where    <picture><source srcset=$\textrm{grad} \{\bullet \}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{x}}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{X}}\frac{\partial \mathbf{X} }{ \partial \mathbf{x}}
-= \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$.

        += \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$" src="form_5189.png"/>.

        Kinetics

        -

        Cauchy's stress theorem equates the Cauchy traction $\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

        -\[
+<p>Cauchy's stress theorem equates the Cauchy traction <picture><source srcset=$\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

        +\[
         \mathbf{t}(\mathbf{x},t, \mathbf{n}) = \boldsymbol{\sigma}\mathbf{n} \, .
-\] +\]" src="form_5191.png"/>

        -

        The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction $\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

        -\[
+<p> The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction <picture><source srcset=$\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

        +\[
         \mathbf{T}(\mathbf{X},t, \mathbf{N}) = \mathbf{P}\mathbf{N} \, .
-\] +\]" src="form_5194.png"/>

        The Cauchy traction $\mathbf{t}$ and the first Piola-Kirchhoff traction $\mathbf{T}$ are related as

        -\[
+<picture><source srcset=\[
         \mathbf{t}\mathrm{d}a = \mathbf{T}\mathrm{d}A \, .
-\] +\]" src="form_5195.png"/>

        This can be demonstrated using Nanson's formula.

        The first Piola-Kirchhoff stress tensor is related to the Cauchy stress as

        -\[
+<picture><source srcset=\[
         \mathbf{P} = J \boldsymbol{\sigma}\mathbf{F}^{-T} \, .
-\] +\]" src="form_5196.png"/>

        -

        Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

        +

        Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

        Push-forward and pull-back operators

        Push-forward and pull-back operators allow one to transform various measures between the material and spatial settings. The stress measures used here are contravariant, while the strain measures are covariant.

        -

        The push-forward and-pull back operations for second-order covariant tensors $(\bullet)^{\text{cov}}$ are respectively given by:

        -\[
+<p>The push-forward and-pull back operations for second-order covariant tensors <picture><source srcset=$(\bullet)^{\text{cov}}$ are respectively given by:

        +\[
         \chi_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{-T} (\bullet)^{\text{cov}} \mathbf{F}^{-1}
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{T} (\bullet)^{\text{cov}} \mathbf{F} \, .
-\] +\]" src="form_5200.png"/>

        -

        The push-forward and pull back operations for second-order contravariant tensors $(\bullet)^{\text{con}}$ are respectively given by:

        -\[
+<p>The push-forward and pull back operations for second-order contravariant tensors <picture><source srcset=$(\bullet)^{\text{con}}$ are respectively given by:

        +\[
         \chi_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F} (\bullet)^{\text{con}} \mathbf{F}^T
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F}^{-1} (\bullet)^{\text{con}} \mathbf{F}^{-T} \, .
-\] +\]" src="form_5202.png"/>

        -

        For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

        +

        For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

        Hyperelastic materials

        -

        A hyperelastic material response is governed by a Helmholtz free energy function $\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

        -\[
+<p>A hyperelastic material response is governed by a Helmholtz free energy function <picture><source srcset=$\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

        +\[
         \mathbf{S}
                 = 2 \dfrac{\partial \Psi(\mathbf{C})}{\partial \mathbf{C}} \, .
-\] +\]" src="form_5205.png"/>

        -

        If the Helmholtz free energy depends on the left Cauchy-Green tensor $\mathbf{b}$ then the isotropic hyperelastic response is

        -\[
+<p> If the Helmholtz free energy depends on the left Cauchy-Green tensor <picture><source srcset=$\mathbf{b}$ then the isotropic hyperelastic response is

        +\[
         \boldsymbol{\tau}
                 = 2 \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \mathbf{b}
                 =  2 \mathbf{b} \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \, .
-\] +\]" src="form_5206.png"/>

        Following the multiplicative decomposition of the deformation gradient, the Helmholtz free energy can be decomposed as

        -\[
+<picture><source srcset=\[
         \Psi(\mathbf{b}) = \Psi_{\text{vol}}(J) + \Psi_{\text{iso}}(\overline{\mathbf{b}}) \, .
-\] +\]" src="form_5207.png"/>

        -

        Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as $\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

        -\begin{align*}
+<p> Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as <picture><source srcset=$\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

        +\begin{align*}
         \boldsymbol{\tau}_{\text{vol}} &=
/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-12-27 18:25:21.052957839 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-12-27 18:25:21.056957867 +0000
@@ -145,14 +145,14 @@
 <div class= first_vector_components = <default value>);

        void collect_periodic_faces(const MeshType &mesh, const types::boundary_id b_id1, const types::boundary_id b_id2, const unsigned int direction, std::vector< PeriodicFacePair< typename MeshType::cell_iterator > > &matched_pairs, const Tensor< 1, MeshType::space_dimension > &offset=::Tensor< 1, MeshType::space_dimension >(), const FullMatrix< double > &matrix=FullMatrix< double >())

        This call loops over all faces of the container dof_handler on the periodic boundaries with boundary indicator b_id1 and b_id2, respectively. (You can assign these boundary indicators by hand after creating the coarse mesh, see Boundary indicator. Alternatively, you can also let many of the functions in namespace GridGenerator do this for if you specify the "colorize" flag; in that case, these functions will assign different boundary indicators to different parts of the boundary, with the details typically spelled out in the documentation of these functions.)

        -

        Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

        +

        Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

        Consider, for example, the colored unit square $\Omega=[0,1]^2$ with boundary indicator 0 on the left, 1 on the right, 2 on the bottom and 3 on the top faces. (See the documentation of GridGenerator::hyper_cube() for this convention on how boundary indicators are assigned.) Then,

        /*b_id1*/ 0,
        /*b_id2*/ 1,
        /*direction*/ 0,
        matched_pairs);
        -

        would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

        -

        If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

        would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

        +

        If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

        /*b_id1*/ 0,
        /*b_id2*/ 1,
        /*direction*/ 0,
        @@ -182,18 +182,18 @@

        Here, we need to specify the orientation of the two faces using face_orientation, face_flip and face_orientation. For a closer description have a look at the documentation of DoFTools::make_periodicity_constraints. The remaining parameters are the same as for the high level interface apart from the self-explaining component_mask and affine_constraints.

        A practical example

        In the following, we show how to use the above functions in a more involved example. The task is to enforce rotated periodicity constraints for the velocity component of a Stokes flow.

        -

        On a quarter-circle defined by $\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

        -\begin{eqnarray*}
+<p>On a quarter-circle defined by <picture><source srcset=$\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

        +\begin{eqnarray*}
   -\Delta \; \textbf{u} + \nabla p &=& (\exp(-100\|{\bf x}-(.75,0.1)^T\|^2),0)^T, \\
   -\textrm{div}\;  \textbf{u}&=&0,\\
   \textbf{u}|_{\Gamma_1}&=&{\bf 0},
-\end{eqnarray*} +\end{eqnarray*}" src="form_5412.png"/>

        -

        where the boundary $\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

        -\begin{align*}
+<p> where the boundary <picture><source srcset=$\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

        +\begin{align*}
   u_x(0,\nu)&=-u_y(\nu,0)&\nu&\in[0,1]\\
   u_y(0,\nu)&=u_x(\nu,0)&\nu&\in[0,1].
-\end{align*} +\end{align*}" src="form_5414.png"/>

        The mesh will be generated by GridGenerator::quarter_hyper_shell(), which also documents how it assigns boundary indicators to its various boundaries if its colorize argument is set to true.

        The commented program

        @@ -216,15 +216,15 @@
         
        Definition point.h:111
        void quarter_hyper_shell(Triangulation< dim > &tria, const Point< dim > &center, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, const bool colorize=false)
        -

        Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
-   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

        -\begin{align*}
+</div><!-- fragment --><p>Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
+   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

        +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5417.png"/>

        The data structure we are saving the resulting information into is here based on the Triangulation.

        @@ -306,23 +306,23 @@
        std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
        void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
        -

        After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

        -\begin{align*}
+</div><!-- fragment --><p>After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

        +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5417.png"/>

        -

        These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

        +

        These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

          FullMatrix<double> rotation_matrix(dim);
          rotation_matrix[0][1] = 1.;
          rotation_matrix[1][0] = -1.;
         
          Tensor<1, dim> offset;
         
        -

        For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

        +

        For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

          std::vector<
          periodicity_vector;
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-12-27 18:25:21.136958416 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-12-27 18:25:21.140958443 +0000 @@ -165,7 +165,7 @@

        Introduction

        This program deals with the problem of coupling different physics in different parts of the domain. Specifically, let us consider the following situation that couples a Stokes fluid with an elastic solid (these two problems were previously discussed separately in step-22 and step-8, where you may want to read up on the individual equations):

          -
        • In a part $\Omega_f$ of $\Omega$, we have a fluid flowing that satisfies the time independent Stokes equations (in the form that involves the strain tensor):

          +

        • In a part $\Omega_f$ of $\Omega$, we have a fluid flowing that satisfies the time independent Stokes equations (in the form that involves the strain tensor):

          \begin{align*}
     -2\eta\nabla \cdot \varepsilon(\mathbf v) + \nabla p &= 0,
           \qquad \qquad && \text{in}\ \Omega_f\\
@@ -231,12 +231,12 @@
         0,
 \end{align*}

          -

          for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

          +

          for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

          This sort of coupling is of course possible by simply having two Triangulation and two DoFHandler objects, one each for each of the two subdomains. On the other hand, deal.II is much simpler to use if there is a single DoFHandler object that knows about the discretization of the entire problem.

          This program is about how this can be achieved. Note that the goal is not to present a particularly useful physical model (a realistic fluid-structure interaction model would have to take into account the finite deformation of the solid and the effect this has on the fluid): this is, after all, just a tutorial program intended to demonstrate techniques, not to solve actual problems. Furthermore, we will make the assumption that the interface between the subdomains is aligned with coarse mesh cell faces.

          The general idea

          Before going into more details let us state the obvious: this is a problem with multiple solution variables; for this, you will probably want to read the Handling vector valued problems documentation topic first, which presents the basic philosophical framework in which we address problems with more than one solution variable. But back to the problem at hand:

          -

          The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables $\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

          +

          The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables $\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

          \begin{align*}
   \tilde {\mathbf v} &\in V
    = \{\tilde {\mathbf v}|_{\Omega_f} \in H^1(\Omega_f)^d, \quad
@@ -247,7 +247,7 @@
        \tilde p|_{\Omega_s} = 0 \}.
 \end{align*}

          -

          (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose $L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
+<p> (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose <picture><source srcset=$L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
 = 0\}$ for the pressure. None of these questions are relevant to the following discussion, however.)

          Note that these are indeed a linear function spaces with obvious norm. Since no confusion is possible in practice, we will henceforth omit the tilde again to denote the extension of a function to the whole domain and simply refer by $\mathbf v, p$ to both the original and the extended function.

          For discretization, we need finite dimensional subspaces $V_h,P_h$ of $V, P$. For Stokes, we know from step-22 that an appropriate choice is $Q_{p+1}^d\times Q_P$ but this only holds for that part of the domain occupied by the fluid. For the extended field, let's use the following subspaces defined on the triangulation $\mathbb T$:

          @@ -274,7 +274,7 @@ Z = \{ \varphi: \varphi(x)=0 \}. \]" src="form_5445.png"/>

          -

          The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

          +

          The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

          This entire discussion above can be repeated for the variables we use to describe the elasticity equation. Here, for the extended variables, we have

          \begin{align*}
   \tilde {\mathbf u} &\in U
@@ -339,7 +339,7 @@
         0,
 \end{align*}

          -

          Given that we have extended the fields by zero, we could in principle write the integrals over subdomains to the entire domain $\Omega$, though it is little additional effort to first ask whether a cell is part of the elastic or fluid region before deciding which terms to integrate. Actually integrating these terms is not very difficult; for the Stokes equations, the relevant steps have been shown in step-22, whereas for the elasticity equation we take essentially the form shown in the Handling vector valued problems topic (rather than the one from step-8).

          +

          Given that we have extended the fields by zero, we could in principle write the integrals over subdomains to the entire domain $\Omega$, though it is little additional effort to first ask whether a cell is part of the elastic or fluid region before deciding which terms to integrate. Actually integrating these terms is not very difficult; for the Stokes equations, the relevant steps have been shown in step-22, whereas for the elasticity equation we take essentially the form shown in the Handling vector valued problems topic (rather than the one from step-8).

          The term that is of more interest is the interface term,

          \[
         -(\mathbf b_h,
@@ -353,7 +353,7 @@
            \mathbf n)_{\partial K \cap \Gamma_i}.
 \]

          -

          Although it isn't immediately obvious, this term presents a slight complication: while $\psi_i[\mathbf u]$ and $\mathbf n$ are evaluated on the solid side of the interface (they are test functions for the displacement and the normal vector to $\Omega_s$, respectively, we need to evaluate $\psi_j[\mathbf v],\psi_j[p]$ on the fluid side of the interface since they correspond to the stress/force exerted by the fluid. In other words, in our implementation, we will need FEFaceValue objects for both sides of the interface. To make things slightly worse, we may also have to deal with the fact that one side or the other may be refined, leaving us with the need to integrate over parts of a face. Take a look at the implementation below on how to deal with this.

          +

          Although it isn't immediately obvious, this term presents a slight complication: while $\psi_i[\mathbf u]$ and $\mathbf n$ are evaluated on the solid side of the interface (they are test functions for the displacement and the normal vector to $\Omega_s$, respectively, we need to evaluate $\psi_j[\mathbf v],\psi_j[p]$ on the fluid side of the interface since they correspond to the stress/force exerted by the fluid. In other words, in our implementation, we will need FEFaceValue objects for both sides of the interface. To make things slightly worse, we may also have to deal with the fact that one side or the other may be refined, leaving us with the need to integrate over parts of a face. Take a look at the implementation below on how to deal with this.

          As an additional complication, the matrix entries that result from this term need to be added to the sparsity pattern of the matrix somehow. This is the realm of various functions in the DoFTools namespace like DoFTools::make_sparsity_pattern and DoFTools::make_flux_sparsity_pattern. Essentially, what these functions do is simulate what happens during assembly of the system matrix: whenever assembly would write a nonzero entry into the global matrix, the functions in DoFTools would add an entry to the sparsity pattern. We could therefore do the following: let DoFTools::make_sparsity_pattern add all those entries to the sparsity pattern that arise from the regular cell-by-cell integration, and then do the same by hand that arise from the interface terms. If you look at the implementation of the interface integrals in the program below, it should be obvious how to do that and would require no more than maybe 100 lines of code at most.

          But we're lazy people: the interface term couples degrees of freedom from two adjacent cells along a face, which is exactly the kind of thing one would do in discontinuous Galerkin schemes for which the function DoFTools::make_flux_sparsity_pattern was written. This is a superset of matrix entries compared to the usual DoFTools::make_sparsity_pattern: it will also add all entries that result from computing terms coupling the degrees of freedom from both sides of all faces. Unfortunately, for the simplest version of this function, this is a pretty big superset. Consider for example the following mesh with two cells and a $Q_1$ finite element:

          2---3---5
          | | |
          @@ -1464,17 +1464,17 @@

          Possibilities for extensions

          Linear solvers and preconditioners

          An obvious place to improve the program would be to use a more sophisticated solver — in particular one that scales well and will also work for realistic 3d problems. This shouldn't actually be too hard to achieve here, because of the one-way coupling from fluid into solid. To this end, assume we had re-ordered degrees of freedom in such a way that we first have all velocity and pressure degrees of freedom, and then all displacements (this is easily possible using DoFRenumbering::component_wise). Then the system matrix could be split into the following block form:

          -\[
+<picture><source srcset=\[
   A_\text{global}
   =
   \begin{pmatrix}
     A_{\text{fluid}} & 0 \\
     B & A_{\text{solid}}
   \end{pmatrix}
-\] +\]" src="form_5472.png"/>

          -

          where $A_{\text{fluid}}$ is the Stokes matrix for velocity and pressure (it could be further subdivided into a $2\times 2$ matrix as in step-22, though this is immaterial for the current purpose), $A_{\text{solid}}$ results from the elasticity equations for the displacements, and $B$ is the matrix that comes from the interface conditions. Now notice that the matrix

          -\[
+<p> where <picture><source srcset=$A_{\text{fluid}}$ is the Stokes matrix for velocity and pressure (it could be further subdivided into a $2\times 2$ matrix as in step-22, though this is immaterial for the current purpose), $A_{\text{solid}}$ results from the elasticity equations for the displacements, and $B$ is the matrix that comes from the interface conditions. Now notice that the matrix

          +\[
   A_\text{global}^{-1}
   =
   \begin{pmatrix}
@@ -1482,10 +1482,10 @@
     -A_\text{solid}^{-1} B
       A_\text{fluid}^{-1} & A_{\text{solid}}^{-1}
   \end{pmatrix}
-\] +\]" src="form_5475.png"/>

          -

          is the inverse of $A_\text{global}$. Applying this matrix requires only one solve with $A_\text{fluid}$ and $A_\text{solid}$ each since

          -\[
+<p> is the inverse of <picture><source srcset=$A_\text{global}$. Applying this matrix requires only one solve with $A_\text{fluid}$ and $A_\text{solid}$ each since

          +\[
   \begin{pmatrix}
     p_x \\ p_y
   \end{pmatrix}
@@ -1498,11 +1498,11 @@
   \begin{pmatrix}
     x \\ y
   \end{pmatrix}
-\] +\]" src="form_5479.png"/>

          -

          can be computed as $p_x = A_{\text{fluid}}^{-1} x$ followed by $p_y = A_{\text{solid}}^{-1} (y-Bp_x)$.

          +

          can be computed as $p_x = A_{\text{fluid}}^{-1} x$ followed by $p_y = A_{\text{solid}}^{-1} (y-Bp_x)$.

          One can therefore expect that

          -\[
+<picture><source srcset=\[
   \widetilde{A_\text{global}^{-1}}
   =
   \begin{pmatrix}
@@ -1510,11 +1510,11 @@
     -\widetilde{A_\text{solid}^{-1}} B
       \widetilde{A_\text{fluid}^{-1}} & \widetilde{A_{\text{solid}}^{-1}}
   \end{pmatrix}
-\] +\]" src="form_5482.png"/>

          -

          would be a good preconditioner if $\widetilde{A_{\text{fluid}}^{-1}}
+<p> would be a good preconditioner if   <picture><source srcset=$\widetilde{A_{\text{fluid}}^{-1}}
 \approx A_{\text{fluid}}^{-1}, \widetilde{A_{\text{solid}}^{-1}}
-\approx A_{\text{solid}}^{-1}$.

          +\approx A_{\text{solid}}^{-1}$" src="form_5483.png"/>.

          That means, we only need good preconditioners for Stokes and the elasticity equations separately. These are well known: for Stokes, we can use the preconditioner discussed in the results section of step-22; for elasticity, a good preconditioner would be a single V-cycle of a geometric or algebraic multigrid. There are more open questions, however: For an "optimized" solver block-triangular preconditioner built from two sub-preconditioners, one point that often comes up is that, when choosing parameters for the sub-preconditioners, values that work well when solving the two problems separately may not be optimal when combined into a multiphysics preconditioner. In particular, when solving just a solid or fluid mechanics problem separately, the balancing act between the number of iterations to convergence and the cost of applying the preconditioner on a per iteration basis may lead one to choose an expensive preconditioner for the Stokes problem and a cheap preconditioner for the elasticity problem (or vice versa). When combined, however, there is the additional constraint that you want the two sub-preconditioners to converge at roughly the same rate, or else the cheap one may drive up the global number of iterations while the expensive one drives up the cost-per-iteration. For example, while a single AMG V-cycle is a good approach for elasticity by itself, when combined into a multiphysics problem there may be an incentive to using a full W-cycle or multiple cycles to help drive down the total solve time.

          Refinement indicators

          As mentioned in the introduction, the refinement indicator we use for this program is rather ad hoc. A better one would understand that the jump in the gradient of the solution across the interface is not indicative of the error but to be expected and ignore the interface when integrating the jump terms. Nevertheless, this is not what the KellyErrorEstimator class does. Another, bigger question, is whether this kind of estimator is a good strategy in the first place: for example, if we want to have maximal accuracy in one particular aspect of the displacement (e.g. the displacement at the top right corner of the solid), then is it appropriate to scale the error indicators for fluid and solid to the same magnitude? Maybe it is necessary to solve the fluid problem with more accuracy than the solid because the fluid solution directly affects the solids solution? Maybe the other way around?

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 2024-12-27 18:25:21.212958938 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 2024-12-27 18:25:21.216958965 +0000 @@ -152,53 +152,53 @@

          The first author would like to acknowledge the support of NSF Grant No. DMS-1520862. Timo Heister and Wolfgang Bangerth acknowledge support through NSF awards DMS-1821210, EAR-1550901, and OAC-1835673.

          Introduction

          This program deals with the biharmonic equation,

          -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega.
-\end{align*} +\end{align*}" src="form_5484.png"/>

          This equation appears in the modeling of thin structures such as roofs of stadiums. These objects are of course in reality three-dimensional with a large aspect ratio of lateral extent to perpendicular thickness, but one can often very accurately model these structures as two dimensional by making assumptions about how internal forces vary in the perpendicular direction. These assumptions lead to the equation above.

          The model typically comes in two different kinds, depending on what kinds of boundary conditions are imposed. The first case,

          -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \Delta u(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega,
-\end{align*} +\end{align*}" src="form_5485.png"/>

          -

          corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

          +

          corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

          In the second possible case of boundary values, one would have

          -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \frac{\partial u(\mathbf x)}{\partial \mathbf n} &= j(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5488.png"/>

          -

          This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

          +

          This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

          As with Dirichlet and Neumann boundary conditions for the Laplace equation, it is of course possible to have one kind of boundary conditions on one part of the boundary, and the other on the remainder.

          What's the issue?

          The fundamental issue with the equation is that it takes four derivatives of the solution. In the case of the Laplace equation we treated in step-3, step-4, and several other tutorial programs, one multiplies by a test function, integrates, integrates by parts, and ends up with only one derivative on both the test function and trial function – something one can do with functions that are continuous globally, but may have kinks at the interfaces between cells: The derivative may not be defined at the interfaces, but that is on a lower-dimensional manifold (and so doesn't show up in the integrated value).

          -

          But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where $\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

          -\begin{align*}
+<p>But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where <picture><source srcset=$\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

          +\begin{align*}
   \int_0^L (\Delta \varphi_i) (\Delta \varphi_j)
   =
   \int_0^L
   \frac 1h \left[\delta(x-x_{i-1}) - 2\delta(x-x_i) + \delta(x-x_{i+1})\right]
   \frac 1h \left[\delta(x-x_{j-1}) - 2\delta(x-x_j) + \delta(x-x_{j+1})\right]
-\end{align*} +\end{align*}" src="form_5490.png"/>

          -

          where $x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

          -\begin{align*}
+<p> where <picture><source srcset=$x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

          +\begin{align*}
   \int_0^L \delta(x-\hat x) f(x) \; dx
   =
   f(\hat x).
-\end{align*} +\end{align*}" src="form_5491.png"/>

          -

          But that only works if (i) $f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

          -\begin{align*}
+<p> But that only works if (i) <picture><source srcset=$f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

          +\begin{align*}
 \int_0^L \delta(x-x_i) \delta (x-x_i)
-\end{align*} +\end{align*}" src="form_5494.png"/>

          does not make sense. Similar reasoning can be applied for 2d and 3d situations.

          In other words: This approach of trying to integrate over the entire domain and then integrating by parts can't work.

          @@ -214,7 +214,7 @@ &&\forall \mathbf x \in \partial\Omega, \end{align*}" src="form_5495.png"/>

          -

          then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

          +

          then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

          \begin{align*}
   -\Delta u(\mathbf x) +v(\mathbf x) &= 0
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
@@ -227,7 +227,7 @@
 \end{align*}

          In other words, we end up with what is in essence a system of two coupled Laplace equations for $u,v$, each with Dirichlet-type boundary conditions. We know how to solve such problems, and it should not be very difficult to construct good solvers and preconditioners for this system either using the techniques of step-20 or step-22. So this case is pretty simple to deal with.

          -
          Note
          It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic +
          Note
          It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic regularity" implies that if the right hand side $v\in H^s$, then $u\in H^{s+2}$ if the domain is convex and the boundary is smooth enough. (This could also be guaranteed if the domain boundary is sufficiently smooth – but domains whose boundaries have no corners are not very practical in real life.) We know that $v\in H^1$ because it solves the equation $-\Delta v=f$, but we are still left with the condition on convexity of the boundary; one can show that polygonal, convex domains are good enough to guarantee that $u\in H^2$ in this case (smoothly bounded, convex domains would result in $u\in H^3$, but we don't need this much regularity). On the other hand, if the domain is not convex, we can not guarantee that the solution of the mixed system is in $H^2$, and consequently may obtain a solution that can't be equal to the solution of the original biharmonic equation.

          The more complicated situation is if we have the "clamped" boundary conditions, i.e., if the equation looks like this:

          \begin{align*}
@@ -239,12 +239,12 @@
   &&\forall \mathbf x \in \partial\Omega.
 \end{align*}

          -

          The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

          -

          The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

          +

          The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

          +

          The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

          It is worth noting that the C0IP method is not the only one that has been developed for the biharmonic equation. step-82 shows an alternative method.

          Derivation of the C0IP method

          -

          We base this program on the $C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

          -

          As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

          +

          We base this program on the $C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

          +

          As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

          \begin{align*}
   \jump{\frac{\partial^k v_h}{\partial \mathbf n^k}}
   &=
@@ -258,8 +258,8 @@
   + \frac{\partial^k v_h|_{K_-}}{\partial \mathbf n^k} \bigg |_e \bigg )
 \end{align*}

          -

          for $k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

          -

          To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

          +

          for $k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

          +

          To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

          \begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K v_h (\nabla\cdot\nabla) (\Delta w_h)
@@ -302,7 +302,7 @@
 \end{align*}

          Then, after making cancellations that arise, we arrive at the following C0IP formulation of the biharmonic equation: find $u_h$ such that $u_h =
-g$ on $\partial \Omega$ and

          +g$" src="form_5531.png"/> on $\partial \Omega$ and

          \begin{align*}
 \mathcal{A}(v_h,u_h)&=\mathcal{F}(v_h) \quad \text{holds for all test functions } v_h,
 \end{align*} @@ -338,17 +338,17 @@ \int_e \jump{\frac{\partial v_h}{\partial \mathbf n}} j \ ds. \end{align*}" src="form_5534.png"/>

          -

          Here, $\gamma$ is the penalty parameter which both weakly enforces the boundary condition

          +

          Here, $\gamma$ is the penalty parameter which both weakly enforces the boundary condition

          \begin{align*}
 \frac{\partial u(\mathbf x)}{\partial \mathbf n} = j(\mathbf x)
 \end{align*}

          -

          on the boundary interfaces $e \in \mathbb{F}^b$, and also ensures that in the limit $h\rightarrow 0$, $u_h$ converges to a $C^1$ continuous function. $\gamma$ is chosen to be large enough to guarantee the stability of the method. We will discuss our choice in the program below.

          +

          on the boundary interfaces $e \in \mathbb{F}^b$, and also ensures that in the limit $h\rightarrow 0$, $u_h$ converges to a $C^1$ continuous function. $\gamma$ is chosen to be large enough to guarantee the stability of the method. We will discuss our choice in the program below.

          Convergence Rates

          -

          On polygonal domains, the weak solution $u$ to the biharmonic equation lives in $H^{2 +\alpha}(\Omega)$ where $\alpha \in(1/2, 2]$ is determined by the interior angles at the corners of $\Omega$. For instance, whenever $\Omega$ is convex, $\alpha=1$; $\alpha$ may be less than one if the domain has re-entrant corners but $\alpha$ is close to $1$ if one of all interior angles is close to $\pi$.

          -

          Now suppose that the $C^0$ IP solution $u_h$ is approximated by $C^0$ shape functions with polynomial degree $p \ge 2$. Then the discretization outlined above yields the convergence rates as discussed below.

          -

          Convergence in the $C^0$ IP-norm

          -

          Ideally, we would like to measure convergence in the "energy norm" $\|D^2(u-u_h)\|$. However, this does not work because, again, the discrete solution $u_h$ does not have two (weak) derivatives. Instead, one can define a discrete ( $C^0$ IP) seminorm that is "equivalent" to the energy norm, as follows:

          +

          On polygonal domains, the weak solution $u$ to the biharmonic equation lives in $H^{2 +\alpha}(\Omega)$ where $\alpha \in(1/2, 2]$ is determined by the interior angles at the corners of $\Omega$. For instance, whenever $\Omega$ is convex, $\alpha=1$; $\alpha$ may be less than one if the domain has re-entrant corners but $\alpha$ is close to $1$ if one of all interior angles is close to $\pi$.

          +

          Now suppose that the $C^0$ IP solution $u_h$ is approximated by $C^0$ shape functions with polynomial degree $p \ge 2$. Then the discretization outlined above yields the convergence rates as discussed below.

          +

          Convergence in the $C^0$ IP-norm

          +

          Ideally, we would like to measure convergence in the "energy norm" $\|D^2(u-u_h)\|$. However, this does not work because, again, the discrete solution $u_h$ does not have two (weak) derivatives. Instead, one can define a discrete ( $C^0$ IP) seminorm that is "equivalent" to the energy norm, as follows:

          \begin{align*}
  |u_h|_{h}^2 :=
  \sum\limits_{K \in \mathbb{T}} \big|u_h\big|_{H^2(K)}^2
@@ -364,7 +364,7 @@
 \end{align*}

          much as one would expect given the convergence rates we know are true for the usual discretizations of the Laplace equation.

          -

          Of course, this is true only if the exact solution is sufficiently smooth. Indeed, if $f \in H^m(\Omega)$ with $m \ge 0$, $u \in H^{2+\alpha}(\Omega)$ where $ 2 < 2+\alpha  \le m+4$, then the convergence rate of the $C^0$ IP method is $\mathcal{O}(h^{\min\{p-1, \alpha\}})$. In other words, the optimal convergence rate can only be expected if the solution is so smooth that $\alpha\ge p-1$; this can only happen if (i) the domain is convex with a sufficiently smooth boundary, and (ii) $m\ge p-3$. In practice, of course, the solution is what it is (independent of the polynomial degree we choose), and the last condition can then equivalently be read as saying that there is definitely no point in choosing $p$ large if $m$ is not also large. In other words, the only reasonably choices for $p$ are $p\le
+<p>Of course, this is true only if the exact solution is sufficiently smooth. Indeed, if <picture><source srcset=$f \in H^m(\Omega)$ with $m \ge 0$, $u \in H^{2+\alpha}(\Omega)$ where $ 2 < 2+\alpha  \le m+4$, then the convergence rate of the $C^0$ IP method is $\mathcal{O}(h^{\min\{p-1, \alpha\}})$. In other words, the optimal convergence rate can only be expected if the solution is so smooth that $\alpha\ge p-1$; this can only happen if (i) the domain is convex with a sufficiently smooth boundary, and (ii) $m\ge p-3$. In practice, of course, the solution is what it is (independent of the polynomial degree we choose), and the last condition can then equivalently be read as saying that there is definitely no point in choosing $p$ large if $m$ is not also large. In other words, the only reasonably choices for $p$ are $p\le
 m+3$ because larger polynomial degrees do not result in higher convergence orders.

          For the purposes of this program, we're a bit too lazy to actually implement this equivalent seminorm – though it's not very difficult and would make for a good exercise. Instead, we'll simply check in the program what the "broken" $H^2$ seminorm

          \begin{align*}
@@ -376,16 +376,16 @@
 \end{align*}

          yields. The convergence rate in this norm can, from a theoretical perspective, of course not be worse than the one for $|\cdot|_h$ because it contains only a subset of the necessary terms, but it could at least conceivably be better. It could also be the case that we get the optimal convergence rate even though there is a bug in the program, and that that bug would only show up in sub-optimal rates for the additional terms present in $|\cdot|_h$. But, one might hope that if we get the optimal rate in the broken norm and the norms discussed below, then the program is indeed correct. The results section will demonstrate that we obtain optimal rates in all norms shown.

          -

          Convergence in the $L_2$-norm

          -

          The optimal convergence rate in the $L_2$-norm is $\mathcal{O}(h^{p+1})$ provided $p \ge 3$. More details can be found in Theorem 4.6 of [Engel2002] .

          +

          Convergence in the $L_2$-norm

          +

          The optimal convergence rate in the $L_2$-norm is $\mathcal{O}(h^{p+1})$ provided $p \ge 3$. More details can be found in Theorem 4.6 of [Engel2002] .

          The default in the program below is to choose $p=2$. In that case, the theorem does not apply, and indeed one only gets $\mathcal{O}(h^2)$ instead of $\mathcal{O}(h^3)$ as we will show in the results section.

          Convergence in the $H^1$-seminorm

          -

          Given that we expect $\mathcal{O}(h^{p-1})$ in the best of cases for a norm equivalent to the $H^2$ seminorm, and $\mathcal{O}(h^{p+1})$ for the $L_2$ norm, one may ask about what happens in the $H^1$ seminorm that is intermediate to the two others. A reasonable guess is that one should expect $\mathcal{O}(h^{p})$. There is probably a paper somewhere that proves this, but we also verify that this conjecture is experimentally true below.

          +

          Given that we expect $\mathcal{O}(h^{p-1})$ in the best of cases for a norm equivalent to the $H^2$ seminorm, and $\mathcal{O}(h^{p+1})$ for the $L_2$ norm, one may ask about what happens in the $H^1$ seminorm that is intermediate to the two others. A reasonable guess is that one should expect $\mathcal{O}(h^{p})$. There is probably a paper somewhere that proves this, but we also verify that this conjecture is experimentally true below.

          Other Boundary Conditions

          -

          We remark that the derivation of the $C^0$ IP method for the biharmonic equation with other boundary conditions – for instance, for the first set of boundary conditions namely $u(\mathbf x) =
-g(\mathbf x)$ and $\Delta u(\mathbf x)= h(\mathbf x)$ on $\partial\Omega$ – can be obtained with suitable modifications to $\mathcal{A}(\cdot,\cdot)$ and $\mathcal{F}(\cdot)$ described in the book chapter [Brenner2011] .

          +

          We remark that the derivation of the $C^0$ IP method for the biharmonic equation with other boundary conditions – for instance, for the first set of boundary conditions namely $u(\mathbf x) =
+g(\mathbf x)$ and $\Delta u(\mathbf x)= h(\mathbf x)$ on $\partial\Omega$ – can be obtained with suitable modifications to $\mathcal{A}(\cdot,\cdot)$ and $\mathcal{F}(\cdot)$ described in the book chapter [Brenner2011] .

          The testcase

          -

          The last step that remains to describe is what this program solves for. As always, a trigonometric function is both a good and a bad choice because it does not lie in any polynomial space in which we may seek the solution while at the same time being smoother than real solutions typically are (here, it is in $C^\infty$ while real solutions are typically only in $H^3$ or so on convex polygonal domains, or somewhere between $H^2$ and $H^3$ if the domain is not convex). But, since we don't have the means to describe solutions of realistic problems in terms of relatively simple formulas, we just go with the following, on the unit square for the domain $\Omega$:

          +

          The last step that remains to describe is what this program solves for. As always, a trigonometric function is both a good and a bad choice because it does not lie in any polynomial space in which we may seek the solution while at the same time being smoother than real solutions typically are (here, it is in $C^\infty$ while real solutions are typically only in $H^3$ or so on convex polygonal domains, or somewhere between $H^2$ and $H^3$ if the domain is not convex). But, since we don't have the means to describe solutions of realistic problems in terms of relatively simple formulas, we just go with the following, on the unit square for the domain $\Omega$:

          \begin{align*}
   u = \sin(\pi x) \sin(\pi y).
 \end{align*} @@ -802,7 +802,7 @@

            copy_data_face.cell_matrix.reinit(n_interface_dofs, n_interface_dofs);
           
          void reinit(const CellIteratorType &cell, const unsigned int face_no, const unsigned int sub_face_no, const CellNeighborIteratorType &cell_neighbor, const unsigned int face_no_neighbor, const unsigned int sub_face_no_neighbor, const unsigned int q_index=numbers::invalid_unsigned_int, const unsigned int mapping_index=numbers::invalid_unsigned_int, const unsigned int fe_index=numbers::invalid_unsigned_int, const unsigned int fe_index_neighbor=numbers::invalid_unsigned_int)
          -

          The second part deals with determining what the penalty parameter should be. By looking at the units of the various terms in the bilinear form, it is clear that the penalty has to have the form $\frac{\gamma}{h_K}$ (i.e., one over length scale), but it is not a priori obvious how one should choose the dimension-less number $\gamma$. From the discontinuous Galerkin theory for the Laplace equation, one might conjecture that the right choice is $\gamma=p(p+1)$ is the right choice, where $p$ is the polynomial degree of the finite element used. We will discuss this choice in a bit more detail in the results section of this program.

          +

        The second part deals with determining what the penalty parameter should be. By looking at the units of the various terms in the bilinear form, it is clear that the penalty has to have the form $\frac{\gamma}{h_K}$ (i.e., one over length scale), but it is not a priori obvious how one should choose the dimension-less number $\gamma$. From the discontinuous Galerkin theory for the Laplace equation, one might conjecture that the right choice is $\gamma=p(p+1)$ is the right choice, where $p$ is the polynomial degree of the finite element used. We will discuss this choice in a bit more detail in the results section of this program.

        In the formula above, $h_K$ is the size of cell $K$. But this is not quite so straightforward either: If one uses highly stretched cells, then a more involved theory says that $h$ should be replaced by the diameter of cell $K$ normal to the direction of the edge in question. It turns out that there is a function in deal.II for that. Secondly, $h_K$ may be different when viewed from the two different sides of a face.

        To stay on the safe side, we take the maximum of the two values. We will note that it is possible that this computation has to be further adjusted if one were to use hanging nodes resulting from adaptive mesh refinement.

          const unsigned int p = fe.degree;
        @@ -901,14 +901,14 @@
          exact_solution.gradient_list(q_points, exact_gradients);
         
         
        -

        Positively, because we now only deal with one cell adjacent to the face (as we are on the boundary), the computation of the penalty factor $\gamma$ is substantially simpler:

        +

        Positively, because we now only deal with one cell adjacent to the face (as we are on the boundary), the computation of the penalty factor $\gamma$ is substantially simpler:

          const unsigned int p = fe.degree;
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html 2024-12-27 18:25:21.272959350 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html 2024-12-27 18:25:21.280959405 +0000 @@ -172,7 +172,7 @@

        Apart from the fact that we avoid solving linear systems with this type of elements when using explicit time-stepping, they come with two other advantages. When we are using the sum-factorization approach to evaluate the finite element operator (cf. step-37), we have to evaluate the function at the quadrature points. In the case of Gauss-Lobatto elements, where quadrature points and node points of the finite element coincide, this operation is trivial since the value of the function at the quadrature points is given by its one-dimensional coefficients. In this way, the arithmetic work for the finite element operator evaluation is reduced by approximately a factor of two compared to the generic Gaussian quadrature.

        To sum up the discussion, by using the right finite element and quadrature rule combination, we end up with a scheme where we only need to compute the right hand side vector corresponding to the formulation above and then multiply it by the inverse of the diagonal mass matrix in each time step. In practice, of course, we extract the diagonal elements and invert them only once at the beginning of the program.

        Implementation of constraints

        -

        The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

        +

        The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

        In the presence of hanging nodes, the diagonal mass matrix obtained on the element level via the Gauss-Lobatto quadrature/node point procedure does not directly translate to a diagonal global mass matrix, as following the constraints on rows and columns would also add off-diagonal entries. As explained in Kormann (2016), interpolating constraints on a vector, which maintains the diagonal shape of the mass matrix, is consistent with the equations up to an error of the same magnitude as the quadrature error. In the program below, we will simply assemble the diagonal of the mass matrix as if it were a vector to enable this approximation.

        Parallelization

        The MatrixFree class comes with the option to be parallelized on three levels: MPI parallelization on clusters of distributed nodes, thread parallelization scheduled by the Threading Building Blocks library, and finally with a vectorization by working on a batch of two (or more) cells via SIMD data type (sometimes called cross-element or external vectorization). As we have already discussed in step-37, you will get best performance by using an instruction set specific to your system, e.g. with the cmake variable -DCMAKE_CXX_FLAGS="-march=native". The MPI parallelization was already exploited in step-37. Here, we additionally consider thread parallelization with TBB. This is fairly simple, as all we need to do is to tell the initialization of the MatrixFree object about the fact that we want to use a thread parallel scheme through the variable MatrixFree::AdditionalData::thread_parallel_scheme. During setup, a dependency graph is set up similar to the one described in the workstream_paper , which allows to schedule the work of the local_apply function on chunks of cells without several threads accessing the same vector indices. As opposed to the WorkStream loops, some additional clever tricks to avoid global synchronizations as described in Kormann and Kronbichler (2011) are also applied.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-12-27 18:25:21.340959817 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-12-27 18:25:21.348959872 +0000 @@ -450,7 +450,7 @@
        void merge_triangulations(const Triangulation< dim, spacedim > &triangulation_1, const Triangulation< dim, spacedim > &triangulation_2, Triangulation< dim, spacedim > &result, const double duplicated_vertex_tolerance=1.0e-12, const bool copy_manifold_ids=false, const bool copy_boundary_ids=false)

        grid_3: Moving vertices

        In this function, we move vertices of a mesh. This is simpler than one usually expects: if you ask a cell using cell->vertex(i) for the coordinates of its ith vertex, it doesn't just provide the location of this vertex but in fact a reference to the location where these coordinates are stored. We can then modify the value stored there.

        -

        So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

        +

        So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

        Note that this sort of procedure does not usually work this way because one will typically encounter the same vertices multiple times and may move them more than once. It works here because we select the vertices we want to use based on their geometric location, and a vertex moved once will fail this test in the future. A more general approach to this problem would have been to keep a std::set of those vertex indices that we have already moved (which we can obtain using cell->vertex_index(i) and only move those vertices whose index isn't in the set yet.

          void grid_3()
          {
        @@ -848,7 +848,7 @@

        Possibilities for extensions

        Assigning different boundary ids

        It is often useful to assign different boundary ids to a mesh that is generated in one form or another as described in this tutorial to apply different boundary conditions.

        -

        For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

        +

        For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

        Extracting a boundary mesh

        Computations on manifolds, like they are done in step-38, require a surface mesh embedded into a higher dimensional space. While some can be constructed using the GridGenerator namespace or loaded from a file, it is sometimes useful to extract a surface mesh from a volume mesh.

        Use the function GridGenerator::extract_boundary_mesh() to extract the surface elements of a mesh. Using the function on a 3d mesh (a Triangulation<3,3>, for example from grid_4()), this will return a Triangulation<2,3> that you can use in step-38. Also try extracting the boundary mesh of a Triangulation<2,2>.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-12-27 18:25:21.380960091 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-12-27 18:25:21.388960146 +0000 @@ -155,7 +155,7 @@ u &= 0 \qquad\qquad & \text{on}\ \partial\Omega. \end{align*}" src="form_5619.png"/>

        -

        If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation that we have already solved in step-3 and step-4. However, if it is indeed spatially variable, it is a more complex equation (sometimes referred to as the "Poisson equation with a coefficient"). Specifically, we will here choose it as follows:

        +

        If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation that we have already solved in step-3 and step-4. However, if it is indeed spatially variable, it is a more complex equation (sometimes referred to as the "Poisson equation with a coefficient"). Specifically, we will here choose it as follows:

        \begin{align*}
   a(\mathbf x) =
   \begin{cases}
@@ -164,10 +164,10 @@
   \end{cases}
 \end{align*}

        -

        Depending on what the variable $u$ refers to, it models a variety of situations with wide applicability:

        +

        Depending on what the variable $u$ refers to, it models a variety of situations with wide applicability:

          -
        • If $u$ is the electric potential, then $-a\nabla u$ is the electric current in a medium and the coefficient $a$ is the conductivity of the medium at any given point. (In this situation, the right hand side of the equation would be the electric source density and would usually be zero or consist of localized, Delta-like, functions if specific points of the domain are connected to current sources that send electrons into or out of the domain.) In many media, $a=a(\mathbf x)$ is indeed spatially variable because the medium is not homogeneous. For example, in electrical impedance tomography, a biomedical imaging technique, one wants to image the body's interior by sending electric currents through the body between electrodes attached to the skin; in this case, $a(\mathbf x)$ describes the electrical conductivity of the different parts of the human body – so $a(\mathbf x)$ would be large for points $\mathbf x$ that lie in organs well supplied by blood (such as the heart), whereas it would be small for organs such as the lung that do not conduct electricity well (because air is a poor conductor). Similarly, if you are simulating an electronic device, $a(\mathbf x)$ would be large in parts of the volume occupied by conductors such as copper, gold, or aluminum; it would have intermediate values for parts of the volume occupied by semiconductors such as silicon; and it would be small in non-conducting and insulating parts of the volume (e.g., those occupied by air, or the circuit board on which the electronics are mounted).
        • -
        • If we are describing the vertical deflection $u$ of a thin membrane under a vertical force $f$, then $a$ would be a measure of the local stiffness of the membrane, which can be spatially variable if the membrane is made from different materials, or if the thickness of the membrane varies spatially. This is the interpretation of the equation that will allow us to interpret the images shown in the results section below.
        • +
        • If $u$ is the electric potential, then $-a\nabla u$ is the electric current in a medium and the coefficient $a$ is the conductivity of the medium at any given point. (In this situation, the right hand side of the equation would be the electric source density and would usually be zero or consist of localized, Delta-like, functions if specific points of the domain are connected to current sources that send electrons into or out of the domain.) In many media, $a=a(\mathbf x)$ is indeed spatially variable because the medium is not homogeneous. For example, in electrical impedance tomography, a biomedical imaging technique, one wants to image the body's interior by sending electric currents through the body between electrodes attached to the skin; in this case, $a(\mathbf x)$ describes the electrical conductivity of the different parts of the human body – so $a(\mathbf x)$ would be large for points $\mathbf x$ that lie in organs well supplied by blood (such as the heart), whereas it would be small for organs such as the lung that do not conduct electricity well (because air is a poor conductor). Similarly, if you are simulating an electronic device, $a(\mathbf x)$ would be large in parts of the volume occupied by conductors such as copper, gold, or aluminum; it would have intermediate values for parts of the volume occupied by semiconductors such as silicon; and it would be small in non-conducting and insulating parts of the volume (e.g., those occupied by air, or the circuit board on which the electronics are mounted).
        • +
        • If we are describing the vertical deflection $u$ of a thin membrane under a vertical force $f$, then $a$ would be a measure of the local stiffness of the membrane, which can be spatially variable if the membrane is made from different materials, or if the thickness of the membrane varies spatially. This is the interpretation of the equation that will allow us to interpret the images shown in the results section below.

        Since the Laplace/Poisson equation appears in so many contexts, there are of course many more uses than just the two listed above, each providing a different interpretation what a spatially variable coefficient would mean in that context.

        What you should have taken away from this is that equations with spatially variable coefficients in the differential operator are quite common, and indeed quite useful in describing the world around us. As a consequence, we should be able to reflect such cases in the numerical methods we use. It turns out that it is not entirely obvious how to deal with such spatially variable coefficients in finite difference methods (though it is also not too complicated to come with ways to do that systematically). But we are using finite element methods, and for these it is entirely trivial to incorporate such coefficients: You just do what you always do, namely multiply by a test function, then integrate by parts. This yields the weak form, which here reads as follows:

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-12-27 18:25:21.488960833 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-12-27 18:25:21.496960888 +0000 @@ -167,14 +167,14 @@ (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h \end{align*}" src="form_5625.png"/>

        -

        on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

        -

        The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

        +

        on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

        +

        The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

        \begin{align*}
  e_{\text{cell}}(K) &= h^2 \| f + \epsilon \triangle u \|_K^2, \\
  e_{\text{face}}(K) &= \sum_F h_F \| \jump{ \epsilon \nabla u \cdot n } \|_F^2,
 \end{align*}

        -

        to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

      #href_anchor"form_4839_dark.png" media="(prefers-color-scheme: dark)"/>$\bar u_h$ in 2d $\bar u_h$ in 3d
      +

      to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

      @@ -184,7 +184,7 @@

      For the active mesh, we use the parallel::distributed::Triangulation class as done in step-40 which uses functionality in the external library p4est for the distribution of the active cells among processors. For the non-active cells in the multilevel hierarchy, deal.II implements what we will refer to as the "first-child rule" where, for each cell in the hierarchy, we recursively assign the parent of a cell to the owner of the first child cell. The following figures give an example of such a distribution. Here the left image represents the active cells for a sample 2D mesh partitioned using a space-filling curve (which is what p4est uses to partition cells); the center image gives the tree representation of the active mesh; and the right image gives the multilevel hierarchy of cells. The colors and numbers represent the different processors. The circular nodes in the tree are the non-active cells which are distributed using the "first-child rule".

      Included among the output to screen in this example is a value "Partition efficiency" given by one over MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy. This imbalance is evident from the example above: while level $\ell=2$ is about as well balanced as is possible with four cells among three processors, the coarse level $\ell=0$ has work for only one processor, and level $\ell=1$ has work for only two processors of which one has three times as much work as the other.

      -

      For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

      +

      For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

      \begin{align*}
 W_{\text{opt}} = \frac1{P}\sum_{\ell} N_{\ell} = \sum_{\ell}\left(\frac1{P}\sum_{p}N_{\ell,p}\right).
 \end{align*} @@ -217,7 +217,7 @@

      These sorts of considerations are considered in much greater detail in [clevenger_par_gmg], which contains a full discussion of the partition efficiency model and the effect the imbalance has on the GMG V-cycle timing. In summary, the value of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of a V-cycle.

      It should be noted that there is potential for some asynchronous work between multigrid levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh could be constructed such that the efficiency model would far overestimate the V-cycle slowdown due to the asynchronous work "covering up" the imbalance (which assumes synchronization over levels). However, for most realistic adaptive meshes the expectation is that this asynchronous work will only cover up a very small portion of the imbalance and the efficiency model will describe the slowdown very well.

      Workload imbalance for algebraic multigrid methods

      -

      The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

      +

      The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

      Algebraic multigrid methods are of course based on an entirely different approach to creating a hierarchy of levels. In particular, they create these purely based on analyzing the system matrix, and very sophisticated algorithms for ensuring that the problem is well load-balanced on every level are implemented in both the hypre and ML/MueLu packages that underly the TrilinosWrappers::PreconditionAMG and PETScWrappers::PreconditionBoomerAMG classes. In some sense, these algorithms are simpler than for geometric multigrid methods because they only deal with the matrix itself, rather than all of the connotations of meshes, neighbors, parents, and other geometric entities. At the same time, much work has also been put into making algebraic multigrid methods scale to very large problems, including questions such as reducing the number of processors that work on a given level of the hierarchy to a subset of all processors, if otherwise processors would spend less time on computations than on communication. (One might note that it is of course possible to implement these same kinds of ideas also in geometric multigrid algorithms where one purposefully idles some processors on coarser levels to reduce the amount of communication. deal.II just doesn't do this at this time.)

      These are not considerations we typically have to worry about here, however: For most purposes, we use algebraic multigrid methods as black-box methods.

      Running the program

      @@ -1089,9 +1089,9 @@

      The result is a function that is similar to the one found in the "Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints" subsection in the "Possibilities for extensions" section of step-37.

      -

      The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

      +

      The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

      Obviously, since we are considering a problem with zero Dirichlet boundary, we could have taken a similar approach to step-37 assemble_rhs(), but this additional work allows us to change the problem declaration if we so choose.

      -

      This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

      +

      This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

      Finally, the system_rhs vector is of type LA::MPI::Vector, but the MatrixFree class only work for LinearAlgebra::distributed::Vector. Therefore we must compute the right-hand side using MatrixFree functionality and then use the functions in the ChangeVectorType namespace to copy it to the correct type.

        template <int dim, int degree>
        void LaplaceProblem<dim, degree>::assemble_rhs()
      @@ -1885,7 +1885,7 @@
    7,168 19 256M 0.16 1.214 0.893 0.521 2.628 2.386 7.260 2.560 12.206 1.844 1.010 1.890 4.744

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
-\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    +\log N)$" src="form_5655.png"/> instead of ${\cal O}(N)$ for geometric multigrid).

    The upshort of the table above is that the matrix-free geometric multigrid method appears to be the fastest approach to solving this equation if not by a huge margin. Matrix-based methods, on the other hand, are consistently the worst.

    The following figure provides strong scaling results for each method, i.e., we solve the same problem on more and more processors. Specifically, we consider the problems after 16 mesh refinement cycles (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-12-27 18:25:21.584961492 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-12-27 18:25:21.592961547 +0000 @@ -170,7 +170,7 @@

    Introduction

    This tutorial program presents the implementation of a hybridizable discontinuous Galkerin method for the convection-diffusion equation.

    Hybridizable discontinuous Galerkin methods

    -

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    +

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    Reducing the size of the linear system

    To alleviate the computational cost of solving such large linear systems, the hybridizable discontinuous Galerkin (HDG) methodology was introduced by Cockburn and co-workers (see the references in the recent HDG overview article by Nguyen and Peraire [Ngu2012]).

    The HDG method achieves this goal by formulating the mathematical problem using Dirichlet-to-Neumann mappings. The partial differential equations are first written as a first order system, and each field is then discretized via a DG method. At this point, the single-valued "trace" values on the skeleton of the mesh, i.e., element faces, are taken to be independent unknown quantities. This yields unknowns in the discrete formulation that fall into two categories:

      @@ -199,7 +199,7 @@ A U &=& F - B \Lambda. \end{eqnarray*}" src="form_5662.png"/>

      -

      The point is that the presence of $A^{-1}$ is not a problem because $A$ is a block diagonal matrix where each block corresponds to one cell and is therefore easy enough to invert. The coupling to other cells is introduced by the matrices $B$ and $C$ over the skeleton variable. The block-diagonality of $A$ and the structure in $B$ and $C$ allow us to invert the matrix $A$ element by element (the local solution of the Dirichlet problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to

        +

        The point is that the presence of $A^{-1}$ is not a problem because $A$ is a block diagonal matrix where each block corresponds to one cell and is therefore easy enough to invert. The coupling to other cells is introduced by the matrices $B$ and $C$ over the skeleton variable. The block-diagonality of $A$ and the structure in $B$ and $C$ allow us to invert the matrix $A$ element by element (the local solution of the Dirichlet problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to

        1. constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ locally on each cell and inserting the contribution into the global trace matrix in the usual way,
        2. @@ -214,7 +214,7 @@

          HDG applied to the convection-diffusion problem

          The HDG formulation used for this example is taken from
          N.C. Nguyen, J. Peraire, B. Cockburn: An implicit high-order hybridizable discontinuous Galerkin method for linear convection–diffusion equations, Journal of Computational Physics, 2009, 228:9, 3232-3254. [DOI]

          -

          We consider the convection-diffusion equation over the domain $\Omega$ with Dirichlet boundary $\partial \Omega_D$ and Neumann boundary $\partial \Omega_N$:

          +

          We consider the convection-diffusion equation over the domain $\Omega$ with Dirichlet boundary $\partial \Omega_D$ and Neumann boundary $\partial \Omega_N$:

          \begin{eqnarray*}
         \nabla \cdot (\mathbf{c} u) - \nabla \cdot (\kappa \nabla u) &=& f,
         \quad \text{ in } \Omega, \\
@@ -232,7 +232,7 @@
         \quad \text{ on }  \partial \Omega_N.
 \end{eqnarray*}

          -

          We multiply these equations by the weight functions $\mathbf{v}, w$ and integrate by parts over every element $K$ to obtain:

          +

          We multiply these equations by the weight functions $\mathbf{v}, w$ and integrate by parts over every element $K$ to obtain:

          \begin{eqnarray*}
   (\mathbf{v}, \kappa^{-1} \mathbf{q})_K - (\nabla\cdot\mathbf{v}, u)_K
     + \left<\mathbf{v}\cdot\mathbf{n}, {\hat{u}}\right>_{\partial K} &=& 0, \\
@@ -247,8 +247,8 @@
   + \tau(u - \hat{u})\mathbf{n} \quad \text{ on } \partial K.
 \end{eqnarray*}

          -

          The variable $\hat {u}$ is introduced as an additional independent variable and is the one for which we finally set up a globally coupled linear system. As mentioned above, it is defined on the element faces and discontinuous from one face to another wherever faces meet (at vertices in 2d, and at edges and vertices in 3d). Values for $u$ and $\mathbf{q}$ appearing in the numerical trace function are taken to be the cell's interior solution restricted to the boundary $\partial K$.

          -

          The local stabilization parameter $\tau$ has effects on stability and accuracy of HDG solutions; see the literature for a further discussion. A stabilization parameter of unity is reported to be the choice which gives best results. A stabilization parameter $\tau$ that tends to infinity prohibits jumps in the solution over the element boundaries, making the HDG solution approach the approximation with continuous finite elements. In the program below, we choose the stabilization parameter as

          +

          The variable $\hat {u}$ is introduced as an additional independent variable and is the one for which we finally set up a globally coupled linear system. As mentioned above, it is defined on the element faces and discontinuous from one face to another wherever faces meet (at vertices in 2d, and at edges and vertices in 3d). Values for $u$ and $\mathbf{q}$ appearing in the numerical trace function are taken to be the cell's interior solution restricted to the boundary $\partial K$.

          +

          The local stabilization parameter $\tau$ has effects on stability and accuracy of HDG solutions; see the literature for a further discussion. A stabilization parameter of unity is reported to be the choice which gives best results. A stabilization parameter $\tau$ that tends to infinity prohibits jumps in the solution over the element boundaries, making the HDG solution approach the approximation with continuous finite elements. In the program below, we choose the stabilization parameter as

          \begin{eqnarray*}
   \tau = \frac{\kappa}{\ell} + |\mathbf{c} \cdot \mathbf{n}|
 \end{eqnarray*} @@ -259,8 +259,8 @@ \hat{u}|_{\partial \Omega_D} = g_D, \end{equation*}" src="form_5681.png"/>

          -

          where the equal sign actually means an $L_2$ projection of the boundary function $g$ onto the space of the face variables (e.g. linear functions on the faces). This constraint is then applied to the skeleton variable $\hat{u}$ using inhomogeneous constraints by the method VectorTools::project_boundary_values.

          -

          Summing the elemental contributions across all elements in the triangulation, enforcing the normal component of the numerical flux, and integrating by parts on the equation weighted by $w$, we arrive at the final form of the problem: Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
+<p> where the equal sign actually means an <picture><source srcset=$L_2$ projection of the boundary function $g$ onto the space of the face variables (e.g. linear functions on the faces). This constraint is then applied to the skeleton variable $\hat{u}$ using inhomogeneous constraints by the method VectorTools::project_boundary_values.

          +

          Summing the elemental contributions across all elements in the triangulation, enforcing the normal component of the numerical flux, and integrating by parts on the equation weighted by $w$, we arrive at the final form of the problem: Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
 \mathcal{V}_h^p \times \mathcal{W}_h^p \times \mathcal{M}_h^p$ such that

          \begin{align*}
   (\mathbf{v}, \kappa^{-1} \mathbf{q}_h)_{\mathcal{T}}
@@ -289,19 +289,19 @@
 <p>We use the notation <picture><source srcset=$(\cdot, \cdot)_{\mathcal{T}} = \sum_K (\cdot, \cdot)_K$ to denote the sum of integrals over all cells and $\left<\cdot,
 \cdot\right>_{\partial \mathcal{T}} = \sum_K \left<\cdot,
 \cdot\right>_{\partial K}$ to denote integration over all faces of all cells, i.e., interior faces are visited twice, once from each side and with the corresponding normal vectors. When combining the contribution from both elements sharing a face, the above equation yields terms familiar from the DG method, with jumps of the solution over the cell boundaries.

          -

          In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable $u_h$ is defined as the space of functions that are tensor product polynomials of degree $p$ on each cell and discontinuous over the element boundaries $\mathcal Q_{-p}$, i.e., the space described by FE_DGQ<dim>(p). The space for the gradient or flux variable $\mathbf{q}_i$ is a vector element space where each component is a locally polynomial and discontinuous $\mathcal Q_{-p}$. In the code below, we collect these two local parts together in one FESystem where the first dim components denote the gradient part and the last scalar component corresponds to the scalar variable. For the skeleton component $\hat{u}_h$, we define a space that consists of discontinuous tensor product polynomials that live on the element faces, which in deal.II is implemented by the class FE_FaceQ. This space is otherwise similar to FE_DGQ, i.e., the solution function is not continuous between two neighboring faces, see also the results section below for an illustration.

          +

          In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable $u_h$ is defined as the space of functions that are tensor product polynomials of degree $p$ on each cell and discontinuous over the element boundaries $\mathcal Q_{-p}$, i.e., the space described by FE_DGQ<dim>(p). The space for the gradient or flux variable $\mathbf{q}_i$ is a vector element space where each component is a locally polynomial and discontinuous $\mathcal Q_{-p}$. In the code below, we collect these two local parts together in one FESystem where the first dim components denote the gradient part and the last scalar component corresponds to the scalar variable. For the skeleton component $\hat{u}_h$, we define a space that consists of discontinuous tensor product polynomials that live on the element faces, which in deal.II is implemented by the class FE_FaceQ. This space is otherwise similar to FE_DGQ, i.e., the solution function is not continuous between two neighboring faces, see also the results section below for an illustration.

          In the weak form given above, we can note the following coupling patterns:

          1. The matrix $A$ consists of local-local coupling terms. These arise when the local weighting functions $(\mathbf{v}, w)$ multiply the local solution terms $(\mathbf{q}_h, u_h)$. Because the elements are discontinuous, $A$ is block diagonal.
          2. -The matrix $B$ represents the local-face coupling. These are the terms with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable $\hat{u}_h$.
          3. +The matrix $B$ represents the local-face coupling. These are the terms with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable $\hat{u}_h$.
          4. -The matrix $C$ represents the face-local coupling, which involves the weighting function $\mu$ multiplying the local solutions $(\mathbf{q}_h, u_h)$.
          5. +The matrix $C$ represents the face-local coupling, which involves the weighting function $\mu$ multiplying the local solutions $(\mathbf{q}_h, u_h)$.
          6. -The matrix $D$ is the face-face coupling; terms involve both $\mu$ and $\hat{u}_h$.
          7. +The matrix $D$ is the face-face coupling; terms involve both $\mu$ and $\hat{u}_h$.

          Post-processing and super-convergence

          -

          One special feature of the HDG methods is that they typically allow for constructing an enriched solution that gains accuracy. This post-processing takes the HDG solution in an element-by-element fashion and combines it such that one can get $\mathcal O(h^{p+2})$ order of accuracy when using polynomials of degree $p$. For this to happen, there are two necessary ingredients:

            +

            One special feature of the HDG methods is that they typically allow for constructing an enriched solution that gains accuracy. This post-processing takes the HDG solution in an element-by-element fashion and combines it such that one can get $\mathcal O(h^{p+2})$ order of accuracy when using polynomials of degree $p$. For this to happen, there are two necessary ingredients:

            1. The computed solution gradient $\mathbf{q}_h$ converges at optimal rate, i.e., $\mathcal{O}(h^{p+1})$.
            2. @@ -319,7 +319,7 @@

              Since we test by the whole set of basis functions in the space of tensor product polynomials of degree $p+1$ in the second set of equations, this is an overdetermined system with one more equation than unknowns. We fix this in the code below by omitting one of these equations (since the rows in the Laplacian are linearly dependent when representing a constant function). As we will see below, this form of the post-processing gives the desired super-convergence result with rate $\mathcal {O}(h^{p+2})$. It should be noted that there is some freedom in constructing $u_h^*$ and this minimization approach to extract the information from the gradient is not the only one. In particular, the post-processed solution defined here does not satisfy the convection-diffusion equation in any sense. As an alternative, the paper by Nguyen, Peraire and Cockburn cited above suggests another somewhat more involved formula for convection-diffusion that can also post-process the flux variable into an $H(\Omega,\mathrm{div})$-conforming variant and better represents the local convection-diffusion operator when the diffusion is small. We leave the implementation of a more sophisticated post-processing as a possible extension to the interested reader.

              Note that for vector-valued problems, the post-processing works similarly. One simply sets the constraint for the mean value of each vector component separately and uses the gradient as the main source of information.

              Problem specific data

              -

              For this tutorial program, we consider almost the same test case as in step-7. The computational domain is $\Omega \dealcoloneq [-1,1]^d$ and the exact solution corresponds to the one in step-7, except for a scaling. We use the following source centers $x_i$ for the exponentials

                +

                For this tutorial program, we consider almost the same test case as in step-7. The computational domain is $\Omega \dealcoloneq [-1,1]^d$ and the exact solution corresponds to the one in step-7, except for a scaling. We use the following source centers $x_i$ for the exponentials

    The 'local' solutions are interior to each element. These represent the primal solution field $u$ as well as the auxiliary field $\mathbf{q}$.

    +

    The 'local' solutions are interior to each element. These represent the primal solution field $u$ as well as the auxiliary field $\mathbf{q}$.

      const FESystem<dim> fe_local;
      DoFHandler<dim> dof_handler_local;
      Vector<double> solution_local;
    @@ -1689,7 +1689,7 @@
    2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
    4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
    9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
    -

    One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates. The quadratic convergence rates of Q1 elements in the $L_2$ norm for both the scalar variable and the gradient variable is apparent, as is the cubic rate for the postprocessed scalar variable in the $L_2$ norm. Note this distinctive feature of an HDG solution. In typical continuous finite elements, the gradient of the solution of order $p$ converges at rate $p$ only, as opposed to $p+1$ for the actual solution. Even though superconvergence results for finite elements are also available (e.g. superconvergent patch recovery first introduced by Zienkiewicz and Zhu), these are typically limited to structured meshes and other special cases. For Q3 HDG variables, the scalar variable and gradient converge at fourth order and the postprocessed scalar variable at fifth order.

    +

    One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates. The quadratic convergence rates of Q1 elements in the $L_2$ norm for both the scalar variable and the gradient variable is apparent, as is the cubic rate for the postprocessed scalar variable in the $L_2$ norm. Note this distinctive feature of an HDG solution. In typical continuous finite elements, the gradient of the solution of order $p$ converges at rate $p$ only, as opposed to $p+1$ for the actual solution. Even though superconvergence results for finite elements are also available (e.g. superconvergent patch recovery first introduced by Zienkiewicz and Zhu), these are typically limited to structured meshes and other special cases. For Q3 HDG variables, the scalar variable and gradient converge at fourth order and the postprocessed scalar variable at fifth order.

    The same convergence rates are observed in 3d.

    Q1 elements, adaptive refinement:
    cells dofs val L2 grad L2 val L2-post
    8 144 7.122e+00 1.941e+01 6.102e+00
    @@ -1730,7 +1730,7 @@
    110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00

    Comparison with continuous finite elements

    Results for 2D

    -

    The convergence tables verify the expected convergence rates stated in the introduction. Now, we want to show a quick comparison of the computational efficiency of the HDG method compared to a usual finite element (continuous Galkerin) method on the problem of this tutorial. Of course, stability aspects of the HDG method compared to continuous finite elements for transport-dominated problems are also important in practice, which is an aspect not seen on a problem with smooth analytic solution. In the picture below, we compare the $L_2$ error as a function of the number of degrees of freedom (left) and of the computing time spent in the linear solver (right) for two space dimensions of continuous finite elements (CG) and the hybridized discontinuous Galerkin method presented in this tutorial. As opposed to the tutorial where we only use unpreconditioned BiCGStab, the times shown in the figures below use the Trilinos algebraic multigrid preconditioner in TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around ChunkSparseMatrix for the trace variable has been used in order to utilize the block structure in the matrix on the finest level.

    +

    The convergence tables verify the expected convergence rates stated in the introduction. Now, we want to show a quick comparison of the computational efficiency of the HDG method compared to a usual finite element (continuous Galkerin) method on the problem of this tutorial. Of course, stability aspects of the HDG method compared to continuous finite elements for transport-dominated problems are also important in practice, which is an aspect not seen on a problem with smooth analytic solution. In the picture below, we compare the $L_2$ error as a function of the number of degrees of freedom (left) and of the computing time spent in the linear solver (right) for two space dimensions of continuous finite elements (CG) and the hybridized discontinuous Galerkin method presented in this tutorial. As opposed to the tutorial where we only use unpreconditioned BiCGStab, the times shown in the figures below use the Trilinos algebraic multigrid preconditioner in TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around ChunkSparseMatrix for the trace variable has been used in order to utilize the block structure in the matrix on the finest level.

    @@ -1747,7 +1747,7 @@

    The results are in line with properties of DG methods in general: Best performance is typically not achieved for linear elements, but rather at somewhat higher order, usually around $p=3$. This is because of a volume-to-surface effect for discontinuous solutions with too much of the solution living on the surfaces and hence duplicating work when the elements are linear. Put in other words, DG methods are often most efficient when used at relatively high order, despite their focus on a discontinuous (and hence, seemingly low accurate) representation of solutions.

    Results for 3D

    -

    We now show the same figures in 3D: The first row shows the number of degrees of freedom and computing time versus the $L_2$ error in the scalar variable $u$ for CG and HDG at order $p$, the second row shows the post-processed HDG solution instead of the original one, and the third row compares the post-processed HDG solution with CG at order $p+1$. In 3D, the volume-to-surface effect makes the cost of HDG somewhat higher and the CG solution is clearly better than HDG for linears by any metric. For cubics, HDG and CG are of similar quality, whereas HDG is again more efficient for sixth order polynomials. One can alternatively also use the combination of FE_DGP and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product polynomials of degree $p$ but Legendre polynomials of complete degree $p$. There are fewer degrees of freedom on the skeleton variable for FE_FaceP for a given mesh size, but the solution quality (error vs. number of DoFs) is very similar to the results for FE_FaceQ.

    +

    We now show the same figures in 3D: The first row shows the number of degrees of freedom and computing time versus the $L_2$ error in the scalar variable $u$ for CG and HDG at order $p$, the second row shows the post-processed HDG solution instead of the original one, and the third row compares the post-processed HDG solution with CG at order $p+1$. In 3D, the volume-to-surface effect makes the cost of HDG somewhat higher and the CG solution is clearly better than HDG for linears by any metric. For cubics, HDG and CG are of similar quality, whereas HDG is again more efficient for sixth order polynomials. One can alternatively also use the combination of FE_DGP and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product polynomials of degree $p$ but Legendre polynomials of complete degree $p$. There are fewer degrees of freedom on the skeleton variable for FE_FaceP for a given mesh size, but the solution quality (error vs. number of DoFs) is very similar to the results for FE_FaceQ.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 2024-12-27 18:25:21.648961932 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 2024-12-27 18:25:21.656961987 +0000 @@ -159,20 +159,20 @@ - \Sigma_a(x) \phi(x,t) + S(x,t) \end{eqnarray*}" src="form_5724.png"/>

    -

    augmented by appropriate boundary conditions. Here, $v$ is the velocity of neutrons (for simplicity we assume it is equal to 1 which can be achieved by simply scaling the time variable), $D$ is the diffusion coefficient, $\Sigma_a$ is the absorption cross section, and $S$ is a source. Because we are only interested in the time dependence, we assume that $D$ and $\Sigma_a$ are constant.

    +

    augmented by appropriate boundary conditions. Here, $v$ is the velocity of neutrons (for simplicity we assume it is equal to 1 which can be achieved by simply scaling the time variable), $D$ is the diffusion coefficient, $\Sigma_a$ is the absorption cross section, and $S$ is a source. Because we are only interested in the time dependence, we assume that $D$ and $\Sigma_a$ are constant.

    Since this program only intends to demonstrate how to use advanced time stepping algorithms, we will only look for the solutions of relatively simple problems. Specifically, we are looking for a solution on a square domain $[0,b]\times[0,b]$ of the form

    \begin{eqnarray*}
 \phi(x,t) = A\sin(\omega t)(bx-x^2).
 \end{eqnarray*}

    By using quadratic finite elements, we can represent this function exactly at any particular time, and all the error will be due to the time discretization. We do this because it is then easy to observe the order of convergence of the various time stepping schemes we will consider, without having to separate spatial and temporal errors.

    -

    We impose the following boundary conditions: homogeneous Dirichlet for $x=0$ and $x=b$ and homogeneous Neumann conditions for $y=0$ and $y=b$. We choose the source term so that the corresponding solution is in fact of the form stated above:

    +

    We impose the following boundary conditions: homogeneous Dirichlet for $x=0$ and $x=b$ and homogeneous Neumann conditions for $y=0$ and $y=b$. We choose the source term so that the corresponding solution is in fact of the form stated above:

    \begin{eqnarray*}
 S=A\left(\frac{1}{v}\omega \cos(\omega t)(bx -x^2) + \sin(\omega t)
 \left(\Sigma_a (bx-x^2)+2D\right) \right).
 \end{eqnarray*}

    -

    Because the solution is a sine in time, we know that the exact solution satisfies $\phi\left(x,\frac{\pi}{\omega}\right) = 0$. Therefore, the error at time $t=\frac{\pi}{\omega}$ is simply the norm of the numerical solution, i.e., $\|e(\cdot,t=\frac{\pi}{\omega})\|_{L_2} = \|\phi_h(\cdot,t=\frac{\pi}{\omega})\|_{L_2}$, and is particularly easily evaluated. In the code, we evaluate the $l_2$ norm of the vector of nodal values of $\phi_h$ instead of the $L_2$ norm of the associated spatial function, since the former is simpler to compute; however, on uniform meshes, the two are just related by a constant and we can consequently observe the temporal convergence order with either.

    +

    Because the solution is a sine in time, we know that the exact solution satisfies $\phi\left(x,\frac{\pi}{\omega}\right) = 0$. Therefore, the error at time $t=\frac{\pi}{\omega}$ is simply the norm of the numerical solution, i.e., $\|e(\cdot,t=\frac{\pi}{\omega})\|_{L_2} = \|\phi_h(\cdot,t=\frac{\pi}{\omega})\|_{L_2}$, and is particularly easily evaluated. In the code, we evaluate the $l_2$ norm of the vector of nodal values of $\phi_h$ instead of the $L_2$ norm of the associated spatial function, since the former is simpler to compute; however, on uniform meshes, the two are just related by a constant and we can consequently observe the temporal convergence order with either.

    Runge-Kutta methods

    The Runge-Kutta methods implemented in deal.II assume that the equation to be solved can be written as:

    \begin{eqnarray*}
@@ -189,12 +189,12 @@
   \frac{\partial u(x,t)}{\partial t} = q(t,u(x,t))
 \end{eqnarray*}

    -

    by test functions, integrating over $\Omega$, substituting $u\rightarrow u_h$ and restricting the test functions to the $\varphi_i(x)$ from above, then this spatially discretized equation has the form

    +

    by test functions, integrating over $\Omega$, substituting $u\rightarrow u_h$ and restricting the test functions to the $\varphi_i(x)$ from above, then this spatially discretized equation has the form

    \begin{eqnarray*}
 M\frac{dU}{dt} = f(t,U),
 \end{eqnarray*}

    -

    where $M$ is the mass matrix and $f(t,U)$ is the spatially discretized version of $q(t,u(x,t))$ (where $q$ is typically the place where spatial derivatives appear, but this is not of much concern for the moment given that we only consider time derivatives). In other words, this form fits the general scheme above if we write

    +

    where $M$ is the mass matrix and $f(t,U)$ is the spatially discretized version of $q(t,u(x,t))$ (where $q$ is typically the place where spatial derivatives appear, but this is not of much concern for the moment given that we only consider time derivatives). In other words, this form fits the general scheme above if we write

    \begin{eqnarray*}
 \frac{dy}{dt} = g(t,y) = M^{-1}f(t,y).
 \end{eqnarray*} @@ -210,7 +210,7 @@ k_i = \Delta t \, M^{-1} f\left(t_n+c_ih,y_n+\sum_{j=1}^sa_{ij}k_j\right). \end{eqnarray*}" src="form_5748.png"/>

    -

    Here $a_{ij}$, $b_i$, and $c_i$ are known coefficients that identify which particular Runge-Kutta scheme you want to use, and $\Delta t=t_{n+1}-t_n$ is the time step used. Different time stepping methods of the Runge-Kutta class differ in the number of stages $s$ and the values they use for the coefficients $a_{ij}$, $b_i$, and $c_i$ but are otherwise easy to implement since one can look up tabulated values for these coefficients. (These tables are often called Butcher tableaus.)

    +

    Here $a_{ij}$, $b_i$, and $c_i$ are known coefficients that identify which particular Runge-Kutta scheme you want to use, and $\Delta t=t_{n+1}-t_n$ is the time step used. Different time stepping methods of the Runge-Kutta class differ in the number of stages $s$ and the values they use for the coefficients $a_{ij}$, $b_i$, and $c_i$ but are otherwise easy to implement since one can look up tabulated values for these coefficients. (These tables are often called Butcher tableaus.)

    At the time of the writing of this tutorial, the methods implemented in deal.II can be divided in three categories:

    1. Explicit Runge-Kutta; in order for a method to be explicit, it is necessary that in the formula above defining $k_i$, $k_i$ does not appear on the right hand side. In other words, these methods have to satisfy $a_{ii}=0, i=1,\ldots,s$.
    2. @@ -221,14 +221,14 @@

    Many well known time stepping schemes that one does not typically associate with the names Runge or Kutta can in fact be written in a way so that they, too, can be expressed in these categories. They oftentimes represent the lowest-order members of these families; one example is the simple explicit Euler method.

    Explicit Runge-Kutta methods

    -

    These methods, only require a function to evaluate $M^{-1}f(t,y)$ but not (as implicit methods) to solve an equation that involves $f(t,y)$ for $y$. As all explicit time stepping methods, they become unstable when the time step chosen is too large.

    +

    These methods, only require a function to evaluate $M^{-1}f(t,y)$ but not (as implicit methods) to solve an equation that involves $f(t,y)$ for $y$. As all explicit time stepping methods, they become unstable when the time step chosen is too large.

    Well known methods in this class include forward Euler, third order Runge-Kutta, and fourth order Runge-Kutta (often abbreviated as RK4).

    Embedded Runge-Kutta methods

    These methods use both a lower and a higher order method to estimate the error and decide if the time step needs to be shortened or can be increased. The term "embedded" refers to the fact that the lower-order method does not require additional evaluates of the function $M^{-1}f(\cdot,\cdot)$ but reuses data that has to be computed for the high order method anyway. It is, in other words, essentially free, and we get the error estimate as a side product of using the higher order method.

    This class of methods include Heun-Euler, Bogacki-Shampine, Dormand-Prince (ode45 in Matlab and often abbreviated as RK45 to indicate that the lower and higher order methods used here are 4th and 5th order Runge-Kutta methods, respectively), Fehlberg, and Cash-Karp.

    At the time of the writing, only embedded explicit methods have been implemented.

    Implicit Runge-Kutta methods

    -

    Implicit methods require the solution of (possibly nonlinear) systems of the form $\alpha y = f(t,y)$ for $y$ in each (sub-)timestep. Internally, this is done using a Newton-type method and, consequently, they require that the user provide functions that can evaluate $M^{-1}f(t,y)$ and $\left(I-\tau M^{-1} \frac{\partial f}{\partial y}\right)^{-1}$ or equivalently $\left(M - \tau \frac{\partial f}{\partial y}\right)^{-1} M$.

    +

    Implicit methods require the solution of (possibly nonlinear) systems of the form $\alpha y = f(t,y)$ for $y$ in each (sub-)timestep. Internally, this is done using a Newton-type method and, consequently, they require that the user provide functions that can evaluate $M^{-1}f(t,y)$ and $\left(I-\tau M^{-1} \frac{\partial f}{\partial y}\right)^{-1}$ or equivalently $\left(M - \tau \frac{\partial f}{\partial y}\right)^{-1} M$.

    The particular form of this operator results from the fact that each Newton step requires the solution of an equation of the form

    \begin{align*}
   \left(M - \tau \frac{\partial f}{\partial y}\right) \Delta y
@@ -237,9 +237,9 @@
 </p>
 <p> for some (given) <picture><source srcset=$h(t,y)$. Implicit methods are always stable, regardless of the time step size, but too large time steps of course affect the accuracy of the solution, even if the numerical solution remains stable and bounded.

    Methods in this class include backward Euler, implicit midpoint, Crank-Nicolson, and the two stage SDIRK method (short for "singly diagonally -implicit Runge-Kutta", a term coined to indicate that the diagonal elements $a_{ii}$ defining the time stepping method are all equal; this property allows for the Newton matrix $I-\tau M^{-1}\frac{\partial f}{\partial y}$ to be re-used between stages because $\tau$ is the same every time).

    +implicit Runge-Kutta", a term coined to indicate that the diagonal elements $a_{ii}$ defining the time stepping method are all equal; this property allows for the Newton matrix $I-\tau M^{-1}\frac{\partial f}{\partial y}$ to be re-used between stages because $\tau$ is the same every time).

    Spatially discrete formulation

    -

    By expanding the solution of our model problem as always using shape functions $\psi_j$ and writing

    +

    By expanding the solution of our model problem as always using shape functions $\psi_j$ and writing

    \begin{eqnarray*}
 \phi_h(x,t) = \sum_j U_j(t) \psi_j(x),
 \end{eqnarray*} @@ -315,8 +315,8 @@

     
    dealii
    Definition namespace_dealii.h:25

    The Diffusion class

    -

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
-   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

    +

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
+   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

      class Diffusion
      {
      public:
    @@ -525,8 +525,8 @@
    ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    Diffusion::evaluate_diffusion

    -

    Next, we evaluate the weak form of the diffusion equation at a given time $t$ and for a given vector $y$. In other words, as outlined in the introduction, we evaluate $M^{-1}(-{\cal D}y - {\cal A}y + {\cal
-   S})$. For this, we have to apply the matrix $-{\cal D} - {\cal A}$ (previously computed and stored in the variable system_matrix) to $y$ and then add the source term which we integrate as we usually do. (Integrating up the solution could be done using VectorTools::create_right_hand_side() if you wanted to save a few lines of code, or wanted to take advantage of doing the integration in parallel.) The result is then multiplied by $M^{-1}$.

    +

    Next, we evaluate the weak form of the diffusion equation at a given time $t$ and for a given vector $y$. In other words, as outlined in the introduction, we evaluate $M^{-1}(-{\cal D}y - {\cal A}y + {\cal
+   S})$. For this, we have to apply the matrix $-{\cal D} - {\cal A}$ (previously computed and stored in the variable system_matrix) to $y$ and then add the source term which we integrate as we usually do. (Integrating up the solution could be done using VectorTools::create_right_hand_side() if you wanted to save a few lines of code, or wanted to take advantage of doing the integration in parallel.) The result is then multiplied by $M^{-1}$.

      Vector<double> Diffusion::evaluate_diffusion(const double time,
      const Vector<double> &y) const
      {
    @@ -914,7 +914,7 @@
     
      assemble_system();
     
    -

    Finally, we solve the diffusion problem using several of the Runge-Kutta methods implemented in namespace TimeStepping, each time outputting the error at the end time. (As explained in the introduction, since the exact solution is zero at the final time, the error equals the numerical solution and can be computed by just taking the $l_2$ norm of the solution vector.)

    +

    Finally, we solve the diffusion problem using several of the Runge-Kutta methods implemented in namespace TimeStepping, each time outputting the error at the end time. (As explained in the introduction, since the exact solution is zero at the final time, the error equals the numerical solution and can be computed by just taking the $l_2$ norm of the solution vector.)

      unsigned int n_steps = 0;
      const unsigned int n_time_steps = 200;
      const double initial_time = 0.;
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-12-27 18:25:21.708962344 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-12-27 18:25:21.712962371 +0000 @@ -155,14 +155,14 @@

    To illustrate how one describes geometries using charts in deal.II, we will consider a case that originates in an application of the ASPECT mantle convection code, using a data set provided by D. Sarah Stamps. In the concrete application, we were interested in describing flow in the Earth mantle under the East African Rift, a zone where two continental plates drift apart. Not to beat around the bush, the geometry we want to describe looks like this:

    In particular, though you cannot see this here, the top surface is not just colored by the elevation but is, in fact, deformed to follow the correct topography. While the actual application is not relevant here, the geometry is. The domain we are interested in is a part of the Earth that ranges from the surface to a depth of 500km, from 26 to 35 degrees East of the Greenwich meridian, and from 5 degrees North of the equator to 10 degrees South.

    -

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    -

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    +

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    +

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    \[
   \mathbf x = \varphi^{-1}(\hat \phi,\hat \theta,\hat d)
   = (R+\hat d) (\cos\hat \phi\cos\hat \theta, \sin\hat \phi\cos\hat \theta, \sin\hat \theta)^T
 \]

    -

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    +

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    1. It is flattened at the poles and larger at the equator: the semi-major axis is approximately 22km longer than the semi-minor axis. We will account for this using the WGS 84 reference standard for the Earth shape. The formula used in WGS 84 to obtain a position in Cartesian coordinates from longitude, latitude, and elevation is

      @@ -225,7 +225,7 @@

      Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

      Implementation

      -

      There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

      class AfricaGeometry : public ChartManifold<3,3>
      +

      There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

      class AfricaGeometry : public ChartManifold<3,3>
      {
      public:
      virtual
      @@ -256,7 +256,7 @@
      -11.983333 35.966667 687
      -11.983333 35.983333 659

      The data is formatted as latitude longitude elevation where the first two columns are provided in degrees North of the equator and degrees East of the Greenwich meridian. The final column is given in meters above the WGS 84 zero elevation.

      -

      In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

      +

      In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

      Now, while tutorial programs are meant to illustrate how to code with deal.II, they do not necessarily have to satisfy the same quality standards as one would have to do with production codes. In a production code, we would write a function that reads the data and (i) automatically determines the extents of the first and second column, (ii) automatically determines the number of data points in each direction, (iii) does the interpolation regardless of the order in which data is arranged, if necessary by switching the order between reading and presenting it to the Functions::InterpolatedUniformGridData class.

      On the other hand, tutorial programs are best if they are short and demonstrate key points rather than dwell on unimportant aspects and, thereby, obscure what we really want to show. Consequently, we will allow ourselves a bit of leeway:

      • since this program is intended solely for a particular geometry around the area of the East-African rift and since this is precisely the area described by the data file, we will hardcode in the program that there are $1139\times 660$ pieces of data;
      • @@ -693,7 +693,7 @@
        • Does it matter? It is almost certainly true that this depends on the equation you are solving. For example, it is known that solving the Euler equations of gas dynamics on complex geometries requires highly accurate boundary descriptions to ensure convergence of quantities that are measure the flow close to the boundary. On the other hand, equations with elliptic components (e.g., the Laplace or Stokes equations) are typically rather forgiving of these issues: one does quadrature anyway to approximate integrals, and further approximating the geometry may not do as much harm as one could fear given that the volume of the overlaps or gaps at every hanging node is only ${\cal O}(h^d)$ even with a linear mapping and ${\cal
-  O}(h^{d+p-1})$ for a mapping of degree $p$. (You can see this by considering that in 2d the gap/overlap is a triangle with base $h$ and height ${\cal
+  O}(h^{d+p-1})$ for a mapping of degree $p$. (You can see this by considering that in 2d the gap/overlap is a triangle with base $h$ and height ${\cal
   O}(h)$; in 3d, it is a pyramid-like structure with base area $h^2$ and height ${\cal O}(h)$. Similar considerations apply for higher order mappings where the height of the gaps/overlaps is ${\cal O}(h^p)$.) In other words, if you use a linear mapping with linear elements, the error in the volume you integrate over is already at the same level as the integration error using the usual Gauss quadrature. Of course, for higher order elements one would have to choose matching mapping objects.

          Another point of view on why it is probably not worth worrying too much about the issue is that there is certainly no narrative in the community of numerical analysts that these issues are a major concern one needs to watch out for when using complex geometries. If it does not seem to be discussed often among practitioners, if ever at all, then it is at least not something people have identified as a common problem.

          This issue is not dissimilar to having hanging nodes at curved boundaries where the geometry description of the boundary typically pulls a hanging node onto the boundary whereas the large edge remains straight, making the adjacent small and large cells not match each other. Although this behavior existed in deal.II since its beginning, 15 years before manifold descriptions became available, it did not ever come up in mailing list discussions or conversations with colleagues.

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 2024-12-27 18:25:21.776962810 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 2024-12-27 18:25:21.780962838 +0000 @@ -153,7 +153,7 @@
        • You can implement various other tasks for parallel programs: error computation, writing graphical output, etc.
        • You can visualize vector fields, stream lines, and contours of vector quantities.
        -

        We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

        +

        We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

        \begin{eqnarray*}
   - \triangle \textbf{u} + \nabla p &=& \textbf{f}, \\
   -\textrm{div}\; \textbf{u} &=& 0.
@@ -194,14 +194,14 @@
 \end{eqnarray*}

        where $S=-BA^{-1} B^T$ is the Schur complement.

        -

        With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

        -

        We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

        -

        For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

        +

        With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

        +

        We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

        +

        For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

        For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

        The testcase

        We will construct a manufactured solution based on the classical Kovasznay problem, see [kovasznay1948laminar]. Here is an image of the solution colored by the x velocity including streamlines of the velocity:

        -

        We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

        +

        We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

        The right-hand side is computed using the script "reference.py" and we use the exact solution for boundary conditions and error computation.

        The commented program

          #include <deal.II/base/quadrature_lib.h>
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-12-27 18:25:21.856963360 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-12-27 18:25:21.856963360 +0000 @@ -154,7 +154,7 @@
        Note
        If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.400995

        Introduction

        Stokes Problem

        -

        The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

        +

        The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

        Let $u \in H_0^1 = \{ u \in H^1(\Omega), u|_{\partial \Omega} = 0 \}$ and $p \in L_*^2 = \{ p \in L^2(\Omega), \int_\Omega p = 0
 \}$. The Stokes equations read as follows in non-dimensionalized form:

        @@ -173,7 +173,7 @@ \left(\begin{array}{c} F \\ 0 \end{array}\right). \end{eqnarray*}" src="form_5834.png"/>

        -

        Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

        +

        Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

        \begin{eqnarray*}
 \left(\begin{array}{cc} A & B^T \\ B & 0 \end{array}\right) P^{-1}
@@ -198,9 +198,9 @@
 \left(\begin{array}{cc} I & 0 \\ 0 & \widetilde{S^{-1}} \end{array}\right).
   \end{eqnarray*}

        -

        Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

        +

        Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

        As discussed in step-22, $-M_p^{-1}=:\widetilde{S^{-1}} \approx
-S^{-1}$, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

        +S^{-1}$" src="form_5841.png"/>, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

        As a comparison, instead of FGMRES, we also use the direct solver UMFPACK on the whole system to compare our results with. If you want to use a direct solver (like UMFPACK), the system needs to be invertible. To avoid the one dimensional null space given by the constant pressures, we fix the first pressure unknown to zero. This is not necessary for the iterative solvers.

        Reference Solution

        The test problem is a "Manufactured Solution" (see step-7 for details), and we choose $u=(u_1,u_2,u_3)=(2\sin (\pi x), - \pi y \cos
@@ -434,7 +434,7 @@
 <div class=  return return_value;

          }
         
        -

      Implementation of $f$. See the introduction for more information.

      +

    Implementation of $f$. See the introduction for more information.

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -1373,14 +1373,14 @@
      }

    Results

    Errors

    -

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    +

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    \[
 \| u -u_h \|_0 + h ( \| u- u_h\|_1 + \|p - p_h \|_0)
 \leq C h^{k+1} ( \|u \|_{k+1} + \| p \|_k )
 \]

    -

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    +

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    @@ -1410,7 +1410,7 @@

    As can be seen from the table:

    1. UMFPACK uses large amounts of memory, especially in 3d. Also, UMFPACK timings do not scale favorably with problem size.
    2. -
    3. Because we are using inner solvers for $A$ and $S$, ILU and GMG require the same number of outer iterations.
    4. +
    5. Because we are using inner solvers for $A$ and $S$, ILU and GMG require the same number of outer iterations.
    6. The number of (inner) iterations for $A$ increases for ILU with refinement, leading to worse than linear scaling in solve time. In contrast, the number of inner iterations for $A$ stays constant with GMG leading to nearly perfect scaling in solve time.
    7. GMG needs slightly more memory than ILU to store the level and interface matrices.
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-12-27 18:25:21.932963882 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-12-27 18:25:21.932963882 +0000 @@ -160,7 +160,7 @@

    Introduction

    Navier Stokes Equations

    In this tutorial we show how to solve the incompressible Navier Stokes equations (NSE) with Newton's method. The flow we consider here is assumed to be steady. In a domain $\Omega \subset
-\mathbb{R}^{d}$, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    +\mathbb{R}^{d}$" src="form_5857.png"/>, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    \begin{eqnarray*}
 - \nu \Delta\textbf{u} + (\textbf{u} \cdot \nabla)\textbf{u} + \nabla p &=& \textbf{f}\\
 - \nabla \cdot \textbf{u} &=& 0.
@@ -242,7 +242,7 @@
 <p>Now, Newton's iteration can be used to solve for the update terms:</p>
 <ol>
 <li>
-Initialization: Initial guess <picture><source srcset=$u_0$ and $p_0$, tolerance $\tau$; +Initialization: Initial guess $u_0$ and $p_0$, tolerance $\tau$;

  • Linear solve to compute update term $\delta\textbf{u}^{k}$ and $\delta p^k$;
  • @@ -258,7 +258,7 @@

    Finding an Initial Guess

    The initial guess needs to be close enough to the solution for Newton's method to converge; hence, finding a good starting value is crucial to the nonlinear solver.

    -

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    +

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    However, the convective term $(\mathbf{u}\cdot\nabla)\mathbf{u}$ will be dominant if the viscosity is small, like $1/7500$ in test case 2. In this situation, we use a continuation method to set up a series of auxiliary NSEs with viscosity approaching the one in the target NSE. Correspondingly, we create a sequence $\{\nu_{i}\}$ with $\nu_{n}= \nu$, and accept that the solutions to two NSE with viscosity $\nu_{i}$ and $\nu_{i+1}$ are close if $|\nu_{i} -
 \nu_{i+1}|$ is small. Then we use the solution to the NSE with viscosity $\nu_{i}$ as the initial guess of the NSE with $\nu_{i+1}$. This can be thought of as a staircase from the Stokes equations to the NSE we want to solve.

    That is, we first solve a Stokes problem

    @@ -322,8 +322,8 @@ \end{pmatrix} \end{eqnarray*}" src="form_5901.png"/>

    -

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    -

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    +

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    +

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -332,13 +332,13 @@
   \end{pmatrix}^{-1}
 \end{eqnarray*}

    -

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    +

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    \begin{eqnarray*}
 \tilde{S}^{-1} \approx -(\nu+\gamma)M_p^{-1}.
 \end{eqnarray*}

    See [Benzi2006] for details.

    -

    We decompose $P^{-1}$ as

    +

    We decompose $P^{-1}$ as

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -355,7 +355,7 @@
   \end{pmatrix}.
 \end{eqnarray*}

    -

    Here two inexact solvers will be needed for $\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
+<p>Here two inexact solvers will be needed for <picture><source srcset=$\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
 M_p^{-1}B)_{ij}$, as explained in [HeisterRapin2013].

    Test Case

    We use the lid driven cavity flow as our test case; see this page for details. The computational domain is the unit square and the right-hand side is $f=0$. The boundary condition is

    @@ -365,7 +365,7 @@ \end{eqnarray*}" src="form_5914.png"/>

    When solving this problem, the error consists of the nonlinear error (from Newton's iteration) and the discretization error (dependent on mesh size). The nonlinear part decreases with each Newton iteration and the discretization error reduces with mesh refinement. In this example, the solution from the coarse mesh is transferred to successively finer meshes and used as an initial guess. Therefore, the nonlinear error is always brought below the tolerance of Newton's iteration and the discretization error is reduced with each mesh refinement.

    -

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    +

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    The commented program

    Include files

    @@ -811,8 +811,8 @@
     
    DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)

    If we were asked to assemble the Newton matrix, then we also built a pressure mass matrix in the bottom right block of the matrix. We only need this for the preconditioner, so we need to copy it in into a separate matrix object, followed by zeroing out this block in the Newton matrix.

    -

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    -

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

    +

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    +

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

      if (assemble_matrix)
      {
      pressure_mass_matrix.reinit(sparsity_pattern.block(1, 1));
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-12-27 18:25:21.996964321 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-12-27 18:25:22.000964349 +0000 @@ -201,7 +201,7 @@ &= 0. \end{align*}" src="form_5941.png"/>

    -

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    +

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    \begin{align*}
   \frac{\partial^2 w}{\partial t^2}
   - \frac 12 \Delta \frac{\partial v}{\partial t}
@@ -215,7 +215,7 @@
   &= 0.
 \end{align*}

    -

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    +

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    The real NLSE, of course, also has the terms $V\psi$ and $\kappa|\psi|^2\psi$. However, these are of lower order in the spatial derivatives, and while they are obviously important, they do not change the character of the equation.

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    The general idea of operator splitting

    @@ -355,7 +355,7 @@ I^{(1)} + I^{(2)} + I^{(3)}. \end{align*}" src="form_5970.png"/>

    -

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    +

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    Before we continue with discussions about operator splitting, let us talk about why one would even want to go this way? The answer is simple: For some of the separate equations for the $\psi^{(k)}$, we may have ways to solve them more efficiently than if we throw everything together and try to solve it at once. For example, and particularly pertinent in the current case: The equation for $\psi^{(3)}$, i.e.,

    \begin{align*}
   \frac{d\psi^{(3)}}{dt}
@@ -487,7 +487,7 @@
 </p>
 <p> (Compare this again with the $\psi(t_{n+1})$: It only differs in how we approximate $\psi(t)$ in each of the three integrals.) In other words, Lie splitting is a lot simpler to implement that the original method outlined above because data handling is so much simpler.

    Operator splitting: the "Strang splitting" approach

    -

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    +

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    But we typically want to use something higher order – say, a Crank-Nicolson or BDF2 method – since these are often not more expensive than a simple Euler method. It would be a shame if we were to use a time stepping method that is ${\cal O}(\Delta t^2)$, but then lose the accuracy again through the operator splitting.

    This is where the Strang splitting method comes in. It is easier to explain if we had only two parts, and so let us combine the effects of the Laplace operator and of the potential into one, and the phase rotation into a second effect. (Indeed, this is what we will do in the code since solving the equation with the Laplace equation with or without the potential costs the same – so we merge these two steps.) The Lie splitting method from above would then do the following: It computes solutions of the following two ODEs,

    \begin{align*}
@@ -577,9 +577,9 @@
 \end{align*}

    Here, the "previous" solution $\psi^{(n,1)}$ (or the "initial -condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after +condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after \_form#href_anchor" but more like "we've already done one third of the work necessary -for time step \_form#3165".)

    +for time step \_form#3098".)

    If we multiply the whole equation with $k_{n+1}$ and sort terms with the unknown $\psi^{(n+1,2)}$ to the left and those with the known $\psi^{(n,2)}$ to the right, then we obtain the following (spatial) partial differential equation that needs to be solved in each time step:

    \begin{align*}
   -i\psi^{(n,2)}
@@ -598,7 +598,7 @@
 <p><a class=

    Spatial discretization and dealing with complex variables

    As mentioned above, the previous tutorial program dealing with complex-valued solutions (namely, step-29) separated real and imaginary parts of the solution. It thus reduced everything to real arithmetic. In contrast, we here want to keep things complex-valued.

    The first part of this is that we need to define the discretized solution as $\psi_h^n(\mathbf x)=\sum_j \Psi^n_j \varphi_j(\mathbf
-x) \approx \psi(\mathbf x,t_n)$ where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    +x) \approx \psi(\mathbf x,t_n)$" src="form_6015.png"/> where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    Of more interest is how to build and solve the linear system. Obviously, this will only be necessary for the second step of the Strang splitting discussed above, with the time discretization of the previous subsection. We obtain the fully discrete version through straightforward substitution of $\psi^n$ by $\psi^n_h$ and multiplication by a test function:

    \begin{align*}
   -iM\Psi^{(n,2)}
@@ -676,7 +676,7 @@
   \int_\Omega \alpha_k e^{-\frac{r_k^2}{R^2}}
 \]

    -

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    +

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    \[
   \left(\int_\Omega e^{-\frac{r_k^2}{R^2}}\right)^{-1}
   =
@@ -684,9 +684,9 @@
 \]

    assuming for the moment that $\Omega={\mathbb R}^d$ – which is of course not the case, but we'll ignore the small difference in integral.

    -

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
-0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    -

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    +

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
+0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    +

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    \[
   \nabla\psi(\mathbf x,t)\cdot \mathbf n=0 \qquad\qquad \forall \mathbf x\in\partial\Omega.
 \] @@ -702,7 +702,7 @@ \end{cases} \]" src="form_6046.png"/>

    -

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    +

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    The commented program

    Include files

    The program starts with the usual include files, all of which you should have seen before by now:

    @@ -867,7 +867,7 @@
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

    Implementation of the NonlinearSchroedingerEquation class

    -

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

    +

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

      template <int dim>
      NonlinearSchroedingerEquation<dim>::NonlinearSchroedingerEquation()
      : fe(2)
    @@ -1025,7 +1025,7 @@
     
     
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    -

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    +

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    The way we solve this here is using a direct solver. We first form the right hand side $r=R\Psi^{(n,1)}$ using the SparseMatrix::vmult() function and put the result into the system_rhs variable. We then call SparseDirectUMFPACK::solver() which takes as argument the matrix $C$ and the right hand side vector and returns the solution in the same vector system_rhs. The final step is then to put the solution so computed back into the solution variable.

      template <int dim>
      void NonlinearSchroedingerEquation<dim>::do_full_spatial_step()
    @@ -1297,7 +1297,7 @@

    Boundary conditions

    In order to be usable for actual, realistic problems, solvers for the nonlinear Schrödinger equation need to utilize boundary conditions that make sense for the problem at hand. We have here restricted ourselves to simple Neumann boundary conditions – but these do not actually make sense for the problem. Indeed, the equations are generally posed on an infinite domain. But, since we can't compute on infinite domains, we need to truncate it somewhere and instead pose boundary conditions that make sense for this artificially small domain. The approach widely used is to use the Perfectly Matched Layer method that corresponds to a particular kind of attenuation. It is, in a different context, also used in step-62.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-12-27 18:25:22.068964815 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-12-27 18:25:22.072964843 +0000 @@ -149,8 +149,8 @@ \end{align*}" src="form_6083.png"/>

    where $\jump{v} = v^- \mathbf{n}^- + v^+ \mathbf{n}^+ = \mathbf n^{-}
-\left(v^- - v^+\right)$ denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    -

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    +\left(v^- - v^+\right)$" src="form_6084.png"/> denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    +

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    In the implementation below, we implement the weak form above by moving the normal vector $\mathbf{n}^-$ from the jump terms to the derivatives to form a normal derivative of the form $\mathbf{n}^-\cdot \nabla u_h$. This makes the implementation on quadrature points slightly more efficient because we only need to work with scalar terms rather than tensors, and is mathematically equivalent.

    For boundary conditions, we use the so-called mirror principle that defines artificial exterior values $u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
 g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ =
@@ -240,7 +240,7 @@
 <div class=  const unsigned int dimension = 3;

     

    Equation data

    -

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

    +

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

      template <int dim>
      class Solution : public Function<dim>
      {
    @@ -588,8 +588,8 @@

    The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
    g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
    u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla
-   u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    -

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

    + u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$" src="form_6117.png"/> on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    +

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::apply_boundary(
      const MatrixFree<dim, number> &data,
    @@ -701,7 +701,7 @@
      }
     

    Next, we go through the cells and pass the scaled matrices to TensorProductMatrixSymmetricSum to actually compute the generalized eigenvalue problem for representing the inverse: Since the matrix approximation is constructed as $A\otimes M + M\otimes A$ and the weights are constant for each element, we can apply all weights on the Laplace matrix and simply keep the mass matrices unscaled. In the loop over cells, we want to make use of the geometry compression provided by the MatrixFree class and check if the current geometry is the same as on the last cell batch, in which case there is nothing to do. This compression can be accessed by FEEvaluation::get_mapping_data_index_offset() once reinit() has been called.

    -

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

    +

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

      cell_matrices.clear();
      unsigned int old_mapping_data_index = numbers::invalid_unsigned_int;
    @@ -1139,7 +1139,7 @@
    void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    -

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

    +

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

      template <int dim, int fe_degree>
      void LaplaceProblem<dim, fe_degree>::run()
      {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-12-27 18:25:22.124965200 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-12-27 18:25:22.128965227 +0000 @@ -158,7 +158,7 @@ u &= 0 \qquad\qquad & \text{on}\ \partial\Omega, \end{align*}" src="form_6129.png"/>

    -

    where $a(\mathbf x)$ is a spatially variable coefficient defined as

    +

    where $a(\mathbf x)$ is a spatially variable coefficient defined as

    \begin{align*}
   a(\mathbf x) =
   \begin{cases}
@@ -199,14 +199,14 @@
   \|\nabla(u-u_h)\|_{\Omega} \le C h_\text{max}^p \| \nabla^{p+1} u \|_{\Omega},
 \end{align*}

    -

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    +

    where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

    The answer lies in the observation that the formula above is not optimal. In fact, some more work shows that the following is a better estimate (which you should compare to the square of the estimate above):

    \begin{align*}
   \|\nabla(u-u_h)\|_{\Omega}^2 \le C \sum_K h_K^{2p} \| \nabla^{p+1} u \|^2_K.
 \end{align*}

    (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

    -

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    +

    Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

    How to deal with hanging nodes in theory

    The methods using triangular meshes mentioned above go to great lengths to make sure that each vertex is a vertex of all adjacent cells – i.e., that there are no hanging nodes. This then automatically makes sure that we can define shape functions in such a way that they are globally continuous (if we use the common $Q_p$ Lagrange finite element methods we have been using so far in the tutorial programs, as represented by the FE_Q class).

    On the other hand, if we define shape functions on meshes with hanging nodes, we may end up with shape functions that are not continuous. To see this, think about the situation above where the top right cell is not refined, and consider for a moment the use of a bilinear finite element. In that case, the shape functions associated with the hanging nodes are defined in the obvious way on the two small cells adjacent to each of the hanging nodes. But how do we extend them to the big adjacent cells? Clearly, the function's extension to the big cell cannot be bilinear because then it needs to be linear along each edge of the large cell, and that means that it needs to be zero on the entire edge because it needs to be zero on the two vertices of the large cell on that edge. But it is not zero at the hanging node itself when seen from the small cells' side – so it is not continuous. The following three figures show three of the shape functions along the edges in question that turn out to not be continuous when defined in the usual way simply based on the cells they are adjacent to:

    @@ -222,7 +222,7 @@
    A discontinuous shape function adjacent to a hanging node
    -

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    +

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    In the program below, we will show how we can get these constraints from deal.II, and how to use them in the solution of the linear system of equations. Before going over the details of the program below, you may want to take a look at the Constraints on degrees of freedom documentation topic that explains how these constraints can be computed and what classes in deal.II work on them.

    How to deal with hanging nodes in practice

    The practice of hanging node constraints is rather simpler than the theory we have outlined above. In reality, you will really only have to add about half a dozen lines of additional code to a program like step-4 to make it work with adaptive meshes that have hanging nodes. The interesting part about this is that it is entirely independent of the equation you are solving: The algebraic nature of these constraints has nothing to do with the equation and only depends on the choice of finite element. As a consequence, the code to deal with these constraints is entirely contained in the deal.II library itself, and you do not need to worry about the details.

    @@ -239,7 +239,7 @@

    So that's exactly what we will do. The locally refined grids are produced using an error estimator which estimates the energy error for numerical solutions of the Laplace operator. Since it was developed by Kelly and co-workers, we often refer to it as the “Kelly refinement indicator” in the library, documentation, and mailing list. The class that implements it is called KellyErrorEstimator, and there is a great deal of information to be found in the documentation of that class that need not be repeated here. The summary, however, is that the class computes a vector with as many entries as there are active cells, and where each entry contains an estimate of the error on that cell. This estimate is then used to refine the cells of the mesh: those cells that have a large error will be marked for refinement, those that have a particularly small estimate will be marked for coarsening. We don't have to do this by hand: The functions in namespace GridRefinement will do all of this for us once we have obtained the vector of error estimates.

    It is worth noting that while the Kelly error estimator was developed for Laplace's equation, it has proven to be a suitable tool to generate locally refined meshes for a wide range of equations, not even restricted to elliptic only problems. Although it will create non-optimal meshes for other equations, it is often a good way to quickly produce meshes that are well adapted to the features of solutions, such as regions of great variation or discontinuities.

    Boundary conditions

    -

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    +

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    The AffineConstraints class can handle such constraints as well, which makes it convenient to let the same object we use for hanging node constraints also deal with these Dirichlet boundary conditions. This way, we don't need to apply the boundary conditions after assembly (like we did in the earlier steps). All that is necessary is that we call the variant of VectorTools::interpolate_boundary_values() that returns its information in an AffineConstraints object, rather than the std::map we have used in previous tutorial programs.

    Other things this program shows

    Since the concepts used for locally refined grids are so important, we do not show much other material in this example. The most important exception is that we show how to use biquadratic elements instead of the bilinear ones which we have used in all previous examples. In fact, the use of higher order elements is accomplished by only replacing three lines of the program, namely the initialization of the fe member variable in the constructor of the main class of this program, and the use of an appropriate quadrature formula in two places. The rest of the program is unchanged.

    @@ -694,8 +694,8 @@
  •   L2 Velocity Reduction L2 Pressure Reduction H1 Velocity Reduction

    As we can see, all preconditioners behave pretty much the same on this simple problem, with the number of iterations growing like ${\cal
-O}(N^{1/2})$ and because each iteration requires around ${\cal
-O}(N)$ operations the total CPU time grows like ${\cal
+O}(N^{1/2})$ and because each iteration requires around ${\cal
+O}(N)$ operations the total CPU time grows like ${\cal
 O}(N^{3/2})$ (for the few smallest meshes, the CPU time is so small that it doesn't record). Note that even though it is the simplest method, Jacobi is the fastest for this problem.

    The situation changes slightly when the finite element is not a bi-quadratic one (i.e., polynomial degree two) as selected in the constructor of this program, but a bi-linear one (polynomial degree one). If one makes this change, the results are as follows:

    @@ -703,7 +703,7 @@

    In other words, while the increase in iterations and CPU time is as before, Jacobi is now the method that requires the most iterations; it is still the fastest one, however, owing to the simplicity of the operations it has to perform. This is not to say that Jacobi is actually a good preconditioner – for problems of appreciable size, it is definitely not, and other methods will be substantially better – but really only that it is fast because its implementation is so simple that it can compensate for a larger number of iterations.

    -

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    +

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    Finally, the last message to take home is that when the data shown above was generated (in 2018), linear systems with 100,000 unknowns are easily solved on a desktop or laptop machine in about a second, making the solution of relatively simple 2d problems even to very high accuracy not that big a task as it used to be in the past. At the same time, the situation for 3d problems continues to be quite different: A uniform 2d mesh with 100,000 unknowns corresponds to a grid with about $300 \times 300$ nodes; the corresponding 3d mesh has $300 \times 300 \times 300$ nodes and 30 million unknowns. Because finite element matrices in 3d have many more nonzero entries than in 2d, solving these linear systems will not only take 300 times as much CPU time, but substantially longer. In other words, achieving the same resolution in 3d is quite a large problem, and solving it within a reasonable amount of time will require much more work to implement better linear solvers. As mentioned above, multigrid methods and matrix-free methods (see, for example, step-37), along with parallelization (step-40) will be necessary, but are then also able to comfortably solve such linear systems.

    A better mesh

    If you look at the meshes above, you will see even though the domain is the unit disk, and the jump in the coefficient lies along a circle, the cells that make up the mesh do not track this geometry well. The reason, already hinted at in step-1, is that in the absence of other information, the Triangulation class only sees a bunch of coarse grid cells but has, of course, no real idea what kind of geometry they might represent when looked at together. For this reason, we need to tell the Triangulation what to do when a cell is refined: where should the new vertices at the edge midpoints and the cell midpoint be located so that the child cells better represent the desired geometry than the parent cell.

    @@ -819,15 +819,15 @@ -\Delta u = f \]" src="form_6151.png"/>

    -

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    -

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    +

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    +

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    \[
   -\nabla \cdot (a \nabla u) = f.
 \]

    -

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    -

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    -

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    +

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    +

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    +

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    To implement something like this, one could replace the coefficient function by the following (shown here only for the 2d case):

    template <int dim>
    double coefficient (const Point<dim> &p)
    {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-12-27 18:25:22.204965749 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-12-27 18:25:22.208965777 +0000 @@ -145,24 +145,24 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.1243280

    Introduction

    Non-matching grid constraints through distributed Lagrange multipliers

    -

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    +

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    There are two interesting scenarios:

    -

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    +

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    \[
 \gamma : C^0(\Omega) \mapsto C^0(\Gamma), \quad \text{ s.t. } \gamma u = u|_{\Gamma} \in C^0(\Gamma),
 \quad \forall u \in C^0(\Omega).
 \]

    -

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    -

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    +

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    +

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    The co-dimension two case is a little more complicated, and in general it is not possible to construct a continuous trace operator, not even from $H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one respectively in two and three dimensions.

    -

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    -

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    +

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    +

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    \begin{eqnarray*}
 - \Delta u + \gamma^T \lambda &=& f  \text{ in } \Omega\\
@@ -170,35 +170,35 @@
 u & = & u_D \text{ on } \partial\Omega.
 \end{eqnarray*}

    -

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    -

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    -

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    -

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    +

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    +

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    +

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    +

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    Multiplying the first equation by $v \in V(\Omega)$ and the second by $q \in
-Q(\Gamma)$, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    -

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    +Q(\Gamma)$" src="form_6175.png"/>, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    +

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    \begin{eqnarray*}
 (\nabla u, \nabla v)_{\Omega} + (\lambda, \gamma v)_{\Gamma} &=& (f,v)_{\Omega} \qquad \forall v \in V(\Omega) \\
 (\gamma u, q)_{\Gamma} &=& (g,q)_{\Gamma} \qquad \forall q \in Q(\Gamma),
 \end{eqnarray*}

    -

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    +

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    -

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    -

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    +

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    +

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    To solve more complex problems, for example one where the domain $\Gamma$ is time dependent, the second option could be a more viable solution. Handling non aligned meshes is complex by itself: to illustrate how is done we study a simple problem.

    The technique we describe here is presented in the literature using one of many names: the immersed finite element method, the fictitious boundary method, the distributed Lagrange multiplier method, and others. The main principle is that the discretization of the two grids and of the two finite element spaces are kept completely independent. This technique is particularly efficient for the simulation of fluid-structure interaction problems, where the configuration of the embedded structure is part of the problem itself, and one solves a (possibly non-linear) elastic problem to determine the (time dependent) configuration of $\Gamma$, and a (possibly non-linear) flow problem in $\Omega
 \setminus \Gamma$, plus coupling conditions on the interface between the fluid and the solid.

    In this tutorial program we keep things a little simpler, and we assume that the configuration of the embedded domain is given in one of two possible ways:

    -

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    +

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    In the embedding space, a standard finite dimensional space space_dh is constructed on the embedding grid space_grid, using the FiniteElement space_fe, following almost verbatim the approach taken in step-6.

    -

    We represent the discretizations of the spaces $V$ and $Q$ with

    +

    We represent the discretizations of the spaces $V$ and $Q$ with

    \[
 V_h(\Omega) = \text{span} \{v_i\}_{i=1}^n
 \] @@ -208,7 +208,7 @@ Q_h(\Gamma) = \text{span} \{q_i\}_{i=1}^m \]" src="form_6191.png"/>

    -

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    +

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    Once all the finite dimensional spaces are defined, the variational formulation of the problem above leaves us with the following finite dimensional system of equations:

    \[
@@ -236,7 +236,7 @@
 G_{\alpha} &\dealcoloneq& (g, q_\alpha)_\Gamma \qquad \alpha = 1,\dots, m.
 \end{eqnarray*}

    -

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    +

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    In particular, the integral that appears in the computation of a single entry of $C$, is computed on $\Gamma$. As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula:

    \[
@@ -246,13 +246,13 @@
 \]

    Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    +(\hat x_i)$" src="form_6197.png"/>. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

    @@ -267,13 +267,13 @@

    This is what the deal.II function VectorTools::point_value() does when evaluating a finite element field (not just a single shape function) at an arbitrary point; but this would be inefficient in this case.

    A better solution is to use a convenient wrapper to perform the first three steps on a collection of points: GridTools::compute_point_locations(). If one is actually interested in computing the full coupling matrix, then it is possible to call the method NonMatching::create_coupling_mass_matrix(), that performs the above steps in an efficient way, reusing all possible data structures, and gathering expensive steps together. This is the function we'll be using later in this tutorial.

    -

    We solve the final saddle point problem by an iterative solver, applied to the Schur complement $S$ (whose construction is described, for example, in step-20), and we construct $S$ using LinearOperator classes.

    +

    We solve the final saddle point problem by an iterative solver, applied to the Schur complement $S$ (whose construction is described, for example, in step-20), and we construct $S$ using LinearOperator classes.

    The testcase

    The problem we solve here is identical to step-4, with the difference that we impose some constraints on an embedded domain $\Gamma$. The tutorial is written in a dimension independent way, and in the results section we show how to vary both dim and spacedim.

    The tutorial is compiled for dim equal to one and spacedim equal to two. If you want to run the program in embedding dimension spacedim equal to three, you will most likely want to change the reference domain for $\Gamma$ to be, for example, something you read from file, or a closed sphere that you later deform to something more interesting.

    In the default scenario, $\Gamma$ has co-dimension one, and this tutorial program implements the Fictitious Boundary Method. As it turns out, the same techniques are used in the Variational Immersed Finite Element Method, and the coupling operator $C$ defined above is the same in almost all of these non-matching methods.

    -

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    -

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    +

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    +

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    References

    DistributedLagrangeProblem

    -

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    -

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    +

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    +

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    A novelty with respect to other tutorial programs is the heavy use of std::unique_ptr. These behave like classical pointers, with the advantage of doing automatic house-keeping: the contained object is automatically destroyed as soon as the unique_ptr goes out of scope, even if it is inside a container or there's an exception. Moreover it does not allow for duplicate pointers, which prevents ownership problems. We do this, because we want to be able to i) construct the problem, ii) read the parameters, and iii) initialize all objects according to what is specified in a parameter file.

    We construct the parameters of our problem in the internal class Parameters, derived from ParameterAcceptor. The DistributedLagrangeProblem class takes a const reference to a Parameters object, so that it is not possible to modify the parameters from within the DistributedLagrangeProblem class itself.

    We could have initialized the parameters first, and then pass the parameters to the DistributedLagrangeProblem assuming all entries are set to the desired values, but this has two disadvantages:

    @@ -369,16 +369,16 @@
     
    ParameterAcceptor
    Definition parameter_acceptor.h:359

    The parameters now described can all be set externally using a parameter file: if no parameter file is present when running the executable, the program will create a "parameters.prm" file with the default values defined here, and then abort to give the user a chance to modify the parameters.prm file.

    -

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

    +

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

      unsigned int initial_refinement = 4;
     
    -

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

    +

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

      unsigned int delta_refinement = 3;
     

    Starting refinement of the embedded grid, corresponding to the domain $\Gamma$.

      unsigned int initial_embedded_refinement = 8;
     
    -

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

    +

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

      std::list<types::boundary_id> dirichlet_ids{0, 1, 2, 3};
     

    FiniteElement degree of the embedding space: $V_h(\Omega)$

    @@ -457,7 +457,7 @@
      std::unique_ptr<Mapping<dim, spacedim>> embedded_mapping;
     
    ParameterAcceptorProxy
    Definition parameter_acceptor.h:697
    -

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

    +

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

      ParameterAcceptorProxy<Functions::ParsedFunction<spacedim>>
      embedding_rhs_function;
     
    @@ -604,7 +604,7 @@
      {
    STL namespace.

    Here is a way to set default values for a ParameterAcceptor class that was constructed using ParameterAcceptorProxy.

    -

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    +

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    It is fundamental for $\Gamma$ to be embedded: from the definition of $C_{\alpha j}$ is clear that, if $\Gamma \not\subseteq \Omega$, certain rows of the matrix $C$ will be zero. This would be a problem, as the Schur complement method requires $C$ to have full column rank.

      embedded_configuration_function.declare_parameters_call_back.connect(
      []() -> void {
    @@ -641,7 +641,7 @@
      TimerOutput::Scope timer_section(monitor, "Setup grids and dofs");
     
    Definition timer.h:557
    -

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

    +

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

      space_grid = std::make_unique<Triangulation<spacedim>>();
     

    Next, we actually create the triangulation using GridGenerator::hyper_cube(). The last argument is set to true: this activates colorization (i.e., assigning different boundary indicators to different parts of the boundary), which we use to assign the Dirichlet and Neumann conditions.

    @@ -692,7 +692,7 @@
     
      setup_embedded_dofs();
     
    -

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    +

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-12-27 18:25:22.292966354 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-12-27 18:25:22.296966381 +0000 @@ -176,136 +176,136 @@ \qquad \mathbf{x} \in \Gamma^N, \end{align*}" src="form_6214.png"/>

    -

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    -

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    +

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    +

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    Weak Galerkin scheme

    -

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    -\begin{equation*}
+<p>The Poisson equation above has a solution <picture><source srcset=$p$ that needs to satisfy the weak formulation of the problem,

    +\begin{equation*}
 \mathcal{A}\left(p,q \right) = \mathcal{F} \left(q \right),
-\end{equation*} +\end{equation*}" src="form_6219.png"/>

    -

    for all test functions $q$, where

    -\begin{equation*}
+<p> for all test functions <picture><source srcset=$q$, where

    +\begin{equation*}
 \mathcal{A}\left(p,q\right)
   \dealcoloneq \int_\Omega \left(\mathbf{K} \nabla p\right) \cdot \nabla q \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6220.png"/>

    and

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{F}\left(q\right)
   \dealcoloneq \int_\Omega f \, q \;\mathrm{d}x
   - \int_{\Gamma^N} u_N q \; \mathrm{d}x.
-\end{equation*} +\end{equation*}" src="form_6221.png"/>

    -

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    -

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    -

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    -\begin{equation*}
+<p> Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of <picture><source srcset=$p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    +

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    +

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    +\begin{equation*}
 \mathcal{A}_h\left(p_h,q \right) = \mathcal{F} \left(q_h \right),
-\end{equation*} +\end{equation*}" src="form_6228.png"/>

    for all discrete test functions $q_h$, where

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   \dealcoloneq \sum_{K \in \mathbb{T}}
     \int_K \mathbf{K} \nabla_{w,d} p_h \cdot \nabla_{w,d} q_h \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6229.png"/>

    and

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
 \mathcal{F}\left(q_h\right)
   \dealcoloneq \sum_{K \in \mathbb{T}} \int_K f \, q_h^\circ \;\mathrm{d}x
   - \sum_{\gamma \in \Gamma_h^N} \int_\gamma u_N q_h^\partial \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6230.png"/>

    -

    The key point is that here, we have replaced the gradient $\nabla p_h$ by the discrete weak gradient operator $\nabla_{w,d} p_h$ that makes sense for our peculiarly defined approximation $p_h$.

    -

    The question is then how that operator works. For this, let us first say how we think of the discrete approximation $p_h$ of the pressure. As mentioned above, the "function" $p_h$ actually consists of two parts: the values $p_h^\circ$ in the interior of cells, and $p_h^\partial$ on the interfaces. We have to define discrete (finite-dimensional) function spaces for both of these; in this program, we will use FE_DGQ for $p_h^\circ$ as the space in the interior of cells (defined on each cell, but in general discontinuous along interfaces), and FE_FaceQ for $p_h^\partial$ as the space on the interfaces.

    -

    Then let us consider just a single cell (because the integrals above are all defined cell-wise, and because the weak discrete gradient is defined cell-by-cell). The restriction of $p_h$ to cell $K$, $p_h|_K$ then consists of the pair $(p_h^\circ|_K,p_h^\partial|_{\partial K})$. In essence, we can think of $\nabla_{w,d} p_h$ of some function defined on $K$ that approximates the gradient; in particular, if $p_h|_K$ was the restriction of a differentiable function (to the interior and boundary of $K$ – which would make it continuous between the interior and boundary), then $\nabla_{w,d} p_h$ would simply be the exact gradient $\nabla p_h$. But, since $p_h|_K$ is not continuous between interior and boundary of $K$, we need a more general definition; furthermore, we can not deal with arbitrary functions, and so require that $\nabla_{w,d} p_h$ is also in a finite element space (which, since the gradient is a vector, has to be vector-valued, and because the weak gradient is defined on each cell separately, will also be discontinuous between cells).

    -

    The way this is done is to define this weak gradient operator $\nabla_{w,d}|_K :
-DGQ_k(K) \times DGQ_r(\partial K) \rightarrow RT_s(K)$ (where $RT_s(K)$ is the vector-valued Raviart-Thomas space of order $s$ on cell $K$) in the following way:

    -\begin{equation*}
+<p> The key point is that here, we have replaced the gradient <picture><source srcset=$\nabla p_h$ by the discrete weak gradient operator $\nabla_{w,d} p_h$ that makes sense for our peculiarly defined approximation $p_h$.

    +

    The question is then how that operator works. For this, let us first say how we think of the discrete approximation $p_h$ of the pressure. As mentioned above, the "function" $p_h$ actually consists of two parts: the values $p_h^\circ$ in the interior of cells, and $p_h^\partial$ on the interfaces. We have to define discrete (finite-dimensional) function spaces for both of these; in this program, we will use FE_DGQ for $p_h^\circ$ as the space in the interior of cells (defined on each cell, but in general discontinuous along interfaces), and FE_FaceQ for $p_h^\partial$ as the space on the interfaces.

    +

    Then let us consider just a single cell (because the integrals above are all defined cell-wise, and because the weak discrete gradient is defined cell-by-cell). The restriction of $p_h$ to cell $K$, $p_h|_K$ then consists of the pair $(p_h^\circ|_K,p_h^\partial|_{\partial K})$. In essence, we can think of $\nabla_{w,d} p_h$ of some function defined on $K$ that approximates the gradient; in particular, if $p_h|_K$ was the restriction of a differentiable function (to the interior and boundary of $K$ – which would make it continuous between the interior and boundary), then $\nabla_{w,d} p_h$ would simply be the exact gradient $\nabla p_h$. But, since $p_h|_K$ is not continuous between interior and boundary of $K$, we need a more general definition; furthermore, we can not deal with arbitrary functions, and so require that $\nabla_{w,d} p_h$ is also in a finite element space (which, since the gradient is a vector, has to be vector-valued, and because the weak gradient is defined on each cell separately, will also be discontinuous between cells).

    +

    The way this is done is to define this weak gradient operator $\nabla_{w,d}|_K :
+DGQ_k(K) \times DGQ_r(\partial K) \rightarrow RT_s(K)$ (where $RT_s(K)$ is the vector-valued Raviart-Thomas space of order $s$ on cell $K$) in the following way:

    +\begin{equation*}
   \int_K \mathbf v_h \cdot (\nabla_{w,d} p_h)
   =
   -\int_K (\nabla \cdot \mathbf v_h) p_h^\circ
   +\int_{\partial K} (\mathbf v_h \cdot \mathbf n) p_h^\partial,
-\end{equation*} +\end{equation*}" src="form_6239.png"/>

    -

    for all test functions $\mathbf v_h \in RT_s(K)$. This is, in essence, simply an application of the integration-by-parts formula. In other words, for a given $p_h=(p^\circ_h,p^\partial_h)$, we need to think of $\nabla_{w,d} p_h|_K$ as that Raviart-Thomas function of degree $s$ for which the left hand side and right hand side are equal for all test functions.

    -

    A key point to make is then the following: While the usual gradient $\nabla$ is a local operator that computes derivatives based simply on the value of a function at a point and its (infinitesimal) neighborhood, the weak discrete gradient $\nabla_{w,d}$ does not have this property: It depends on the values of the function it is applied to on the entire cell, including the cell's boundary. Both are, however, linear operators as is clear from the definition of $\nabla_{w,d}$ above, and that will allow us to represent $\nabla_{w,d}$ via a matrix in the discussion below.

    -
    Note
    It may be worth pointing out that while the weak discrete gradient is an element of the Raviart-Thomas space $RT_s(K)$ on each cell $K$, it is discontinuous between cells. On the other hand, the Raviart-Thomas space $RT_s=RT_s({\mathbb T})$ defined on the entire mesh and implemented by the FE_RaviartThomas class represents functions that have continuous normal components at interfaces between cells. This means that globally, $\nabla_{w,d} p_h$ is not in $RT_s$, even though it is on every cell $K$ in $RT_s(K)$. Rather, it is in a "broken" Raviart-Thomas space that below we will represent by the symbol $DGRT_s$. (The term "broken" here refers to the process of "breaking something apart", and not to the synonym to the expression "not functional".) One might therefore (rightfully) argue that the notation used in the weak Galerkin literature is a bit misleading, but as so often it all depends on the context in which a certain notation is used – in the current context, references to the Raviart-Thomas space or element are always understood to be to the "broken" spaces.
    +

    for all test functions $\mathbf v_h \in RT_s(K)$. This is, in essence, simply an application of the integration-by-parts formula. In other words, for a given $p_h=(p^\circ_h,p^\partial_h)$, we need to think of $\nabla_{w,d} p_h|_K$ as that Raviart-Thomas function of degree $s$ for which the left hand side and right hand side are equal for all test functions.

    +

    A key point to make is then the following: While the usual gradient $\nabla$ is a local operator that computes derivatives based simply on the value of a function at a point and its (infinitesimal) neighborhood, the weak discrete gradient $\nabla_{w,d}$ does not have this property: It depends on the values of the function it is applied to on the entire cell, including the cell's boundary. Both are, however, linear operators as is clear from the definition of $\nabla_{w,d}$ above, and that will allow us to represent $\nabla_{w,d}$ via a matrix in the discussion below.

    +
    Note
    It may be worth pointing out that while the weak discrete gradient is an element of the Raviart-Thomas space $RT_s(K)$ on each cell $K$, it is discontinuous between cells. On the other hand, the Raviart-Thomas space $RT_s=RT_s({\mathbb T})$ defined on the entire mesh and implemented by the FE_RaviartThomas class represents functions that have continuous normal components at interfaces between cells. This means that globally, $\nabla_{w,d} p_h$ is not in $RT_s$, even though it is on every cell $K$ in $RT_s(K)$. Rather, it is in a "broken" Raviart-Thomas space that below we will represent by the symbol $DGRT_s$. (The term "broken" here refers to the process of "breaking something apart", and not to the synonym to the expression "not functional".) One might therefore (rightfully) argue that the notation used in the weak Galerkin literature is a bit misleading, but as so often it all depends on the context in which a certain notation is used – in the current context, references to the Raviart-Thomas space or element are always understood to be to the "broken" spaces.
    deal.II happens to have an implementation of this broken Raviart-Thomas space: The FE_DGRaviartThomas class. As a consequence, in this tutorial we will simply always use the FE_DGRaviartThomas class, even though in all of those places where we have to compute cell-local matrices and vectors, it makes no difference.

    Representing the weak gradient

    -

    Since $p_h$ is an element of a finite element space, we can expand it in a basis as we always do, i.e., we can write

    -\begin{equation*}
+<p>Since <picture><source srcset=$p_h$ is an element of a finite element space, we can expand it in a basis as we always do, i.e., we can write

    +\begin{equation*}
   p_h(\mathbf x) = \sum_j P_j \varphi_j(\mathbf x).
-\end{equation*} +\end{equation*}" src="form_6247.png"/>

    -

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation topic on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    -\begin{equation*}
+<p> Here, since <picture><source srcset=$p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation topic on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    +\begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   = \sum_{K \in \mathbb{T}}
     \int_K \mathbf{K} \nabla_{w,d} p_h \cdot \nabla_{w,d} q_h \;\mathrm{d}x,
-\end{equation*} +\end{equation*}" src="form_6252.png"/>

    -

    The key point is that $\nabla_{w,d} \varphi_j$ is known to be a member of the "broken" Raviart-Thomas space $DGRT_s$. What this means is that we can represent (on each cell $K$ separately)

    -\begin{equation*}
+<p>The key point is that <picture><source srcset=$\nabla_{w,d} \varphi_j$ is known to be a member of the "broken" Raviart-Thomas space $DGRT_s$. What this means is that we can represent (on each cell $K$ separately)

    +\begin{equation*}
 \nabla_{w,d} \varphi_j|_K
   = \sum_k C_{jk}^K \mathbf v_k|_K
-\end{equation*} +\end{equation*}" src="form_6253.png"/>

    -

    where the functions $\mathbf v_k \in DGRT_s$, and where $C^K$ is a matrix of dimension

    -\begin{align*}
+<p> where the functions <picture><source srcset=$\mathbf v_k \in DGRT_s$, and where $C^K$ is a matrix of dimension

    +\begin{align*}
  \text{dim}\left(DGQ_k(K) \times DGQ_r(K)\right) &\times \text{dim}\left(RT_s(K)\right)
   \\
  &=
  \left(\text{dim}(DGQ_k(K)) + \text{dim}(DGQ_r(K))\right) \times \text{dim}\left(RT_s(K)\right).
-\end{align*} +\end{align*}" src="form_6256.png"/>

    (That the weak discrete gradient can be represented as a matrix should not come as a surprise: It is a linear operator from one finite dimensional space to another finite dimensional space. If one chooses bases for both of these spaces, then every linear operator can of course be written as a matrix mapping the vector of expansion coefficients with regards to the basis of the domain space of the operator, to the vector of expansion coefficients with regards to the basis in the image space.)

    Using this expansion, we can easily use the definition of the weak discrete gradient above to define what the matrix is going to be:

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \int_K \mathbf v_i \cdot \left(\sum_k C_{jk}^K \mathbf v_k\right)
   =
   -\int_K (\nabla \cdot \mathbf v_i) \varphi_j^\circ
   +\int_{\partial K} (\mathbf v_i \cdot \mathbf n) \varphi_j^\partial,
-\end{equation*} +\end{equation*}" src="form_6257.png"/>

    -

    for all test functions $\mathbf v_i \in DGRT_s$.

    +

    for all test functions $\mathbf v_i \in DGRT_s$.

    This clearly leads to a linear system of the form

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \sum_k M_{ik}^K C_{jk}^K
   =
   G_{ij}^K
-\end{equation*} +\end{equation*}" src="form_6259.png"/>

    with

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   M_{ik}^K = \int_K \mathbf v_i \cdot \mathbf v_k,
   \qquad\qquad
   G_{ij}^K = -\int_K (\nabla \cdot \mathbf v_i) \varphi_j^\circ
              +\int_{\partial K} (\mathbf v_i \cdot \mathbf n) \varphi_j^\partial,
-\end{equation*} +\end{equation*}" src="form_6260.png"/>

    and consequently

    -\begin{equation*}
+<picture><source srcset=\begin{equation*}
   \left(C^K\right)^T = \left(M^K\right)^{-1} G^K.
-\end{equation*} +\end{equation*}" src="form_6261.png"/>

    -

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    -\begin{equation*}
+<p> (In this last step, we have assumed that the indices <picture><source srcset=$i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    +\begin{equation*}
   C^K = \left(G^K\right)^{T} \left(M^K\right)^{-1}.
-\end{equation*} +\end{equation*}" src="form_6262.png"/>

    -

    Also worth pointing out is that the matrices $C^K$ and $G^K$ are of course not square but rectangular.

    +

    Also worth pointing out is that the matrices $C^K$ and $G^K$ are of course not square but rectangular.

    Assembling the linear system

    -

    Having explained how the weak discrete gradient is defined, we can now come back to the question of how the linear system for the equation in question should be assembled. Specifically, using the definition of the bilinear form ${\cal A}_h$ shown above, we then need to compute the elements of the local contribution to the global matrix,

    -\begin{equation*}
+<p>Having explained how the weak discrete gradient is defined, we can now come back to the question of how the linear system for the equation in question should be assembled. Specifically, using the definition of the bilinear form <picture><source srcset=${\cal A}_h$ shown above, we then need to compute the elements of the local contribution to the global matrix,

    +\begin{equation*}
   A^K_{ij} = \int_K \left({\mathbf K} \nabla_{w,d} \varphi_i\right) \cdot \nabla_{w,d} \varphi_j.
-\end{equation*} +\end{equation*}" src="form_6265.png"/>

    -

    As explained above, we can expand $\nabla_{w,d} \varphi_i$ in terms of the Raviart-Thomas basis on each cell, and similarly for $\nabla_{w,d} \varphi_j$:

    -\begin{equation*}
+<p> As explained above, we can expand <picture><source srcset=$\nabla_{w,d} \varphi_i$ in terms of the Raviart-Thomas basis on each cell, and similarly for $\nabla_{w,d} \varphi_j$:

    +\begin{equation*}
   A^K_{ij} = \int_K
     \left(
       {\mathbf K}
@@ -313,10 +313,10 @@
     \right)
/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html	2024-12-27 18:25:22.388967012 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html	2024-12-27 18:25:22.392967040 +0000
@@ -167,10 +167,10 @@
 <dl class=

    Note
    As a prerequisite of this program, you need to have HDF5, complex PETSc, and the p4est libraries installed. The installation of deal.II together with these additional libraries is described in the README file.

    Introduction

    A phononic crystal is a periodic nanostructure that modifies the motion of mechanical vibrations or phonons. Phononic structures can be used to disperse, route and confine mechanical vibrations. These structures have potential applications in quantum information and have been used to study macroscopic quantum phenomena. Phononic crystals are usually fabricated in cleanrooms.

    -

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    +

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    Phononic superlattice cavity

    In this tutorial we calculate the band gap and the mechanical resonance of a phononic superlattice cavity but the code presented here can be easily used to design and calculate other types of phononic crystals.

    -

    The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width: