~/f/dealii/RPMS.2017 ~/f/dealii ~/f/dealii RPMS.2017/deal_II-devel-9.6.0-1.1.x86_64.rpm RPMS/deal_II-devel-9.6.0-1.1.x86_64.rpm differ: char 225, line 1 Comparing deal_II-devel-9.6.0-1.1.x86_64.rpm to deal_II-devel-9.6.0-1.1.x86_64.rpm comparing the rpm tags of deal_II-devel --- old-rpm-tags +++ new-rpm-tags @@ -10400 +10400 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html c78adf8783cfa81a27da775c05e6b7ba52f931015e0e53eecae032f722941e92 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html f7fe8cd59ff55fc156cd89d052777a77a4f95ac109c4b5353990fbc3f890d3ac 2 @@ -10403,3 +10403,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 99dcaf182e6179046bd273476022de4e8e0b180c94e593ee1853a88ef2996197 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 8c9ee69c8efcfb2ee2d3ba225895e65307fcce4f35e8eaa24f41fd423ad272b4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 9537dba197eff073bc8224d4702e902bb5a83a70502c386a221a3b4322a930a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 4462042d2bc8672680238bb44309b344392e4564ba2550594c5575408f751ebe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex fc21e29b041e3756ca6d018a27e168bafb105b6afc6f0a06de6a6e434a355506 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 8b5bfad4249a3079f8c81896e0f819970cf8e37484e6f6de05ed8f9b36f57934 2 @@ -10575 +10575 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html a632df96524eafea9a8138336ea418bbd387e9fcb733925344531b9ead3d3a53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 5ab597745cfe33ebe18d7b85fcc00db85dee9182b0bca4c2fff933b5e9ad6b98 2 @@ -10577 +10577 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 18ea8c7cbddc07d8a714c160dbee73b0e37144d6efcff54ba6911eff5fc87cf4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 5937854a598320788655a171607ad126f06b2721705459fe9f50ebcaa77c64f7 2 @@ -10585 +10585 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html dc07f589f00c9e36e66912f3ca56ad76ff958c8670b91f1e531fc4fb07416b1b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html dd472b567d7635722a8819a3276534a0467918ce0932dae7bc42423f2ebe05e2 2 @@ -10587 +10587 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html c04d370408a2bbd795ab6a2f353e95e281c5bc9df0881ccda301a98332c92edd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 1a14392eddafb4a2968646a3227c45385461af69af41f37fca0ed266d492b1bd 2 @@ -10596 +10596 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html cc5bd0d91485336e71684876a59249f9f70303b6ba8c1a723652db0f35eeedf7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 23f13de9868ab558c240d0cf001279814cb907b25eee7fd79e7a111423ebcf9e 2 @@ -10624 +10624 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 671a0a452656178377bfcda45a579ae78f3f4f409c23fdf238a766a4cb02776b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 0e9b545996a447212a15ed48fe4076643f7692d78d01c36bab5d19c484b8e16f 2 @@ -10642 +10642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 4512bc229ccafb084626c7006af7934e7bcc85c040c505d7fbc138a637f5aafc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html c6031ca1522badde25825fecf3857930c413658bbe9854abf88ccfbc21d435af 2 @@ -10659 +10659 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 9a0887da74400a6a6230cba3615f8ccc004ddd319fea6ebc6643b23c5cc49c25 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 9826b6b28a3c6e171dc816c93366dc5d4ea91432ca2b303eb92ad7c3afc53e4a 2 @@ -10696 +10696 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 96c03a49876eef542e23c8f556edb0ea6db302ff18ef0fe5a2e0c893ab67d5d7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2994463330bba94f32c34e962fba263a9048db9e498cd53a945facf72563a331 2 @@ -10699 +10699 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 6f0b75e7e64c4906e75e85be547120e6dddd1635f179594e318451da44d61144 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 4d8395940cdd4d6ddf0b0a355743bc5691082634e44acbedd86dca75384626aa 2 @@ -10705 +10705 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 5c57b19bec88c19083e5f2e789cc4549147fda1813a74af84c32b36515c36ff8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 14027d8907a83df0b2c6982237fbce286ea12ba37683e2365b5b12a1963f0a31 2 @@ -10712 +10712 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 16489875737a1f4265098f9ef22fc9c4dfae892db325e48cd7ab6ff07e845b24 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html 929f289620b33e98351ddd9700451d15f3750efef522187622ac08f136de0113 2 @@ -10718 +10718 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 1b07872ff7ff84836aca064819bcdb2b38affaf8ac276412a010d61bd88572f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 4f9874ff4b4d51e43196b4d7093d40cc4c4b4564e247b9d601fad18fc834c83d 2 @@ -10724 +10724 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html fcbb3c3c55164593930a8ea79d5805b11744575635e7aa3c7c0a9c8c9db1fbb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html a11ddc935a1d93262ca9befd11505b8d8b3cb6348267d911c438f98f3371d409 2 @@ -10729 +10729 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html cbf1bf2fc5ba959a9a228d1300e3632b7cf5414c709aa2d329ac260fc41829a8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html d2788e87b8623ae1cc9df620482be0192c8a9d8626890d77a2fdc46982965a9a 2 @@ -10742 +10742 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 198781c66bb7eb09bc73a4e9a4095940b0abcbb1cb587e971e1ee7edebd8a132 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 02af0d72b4fe3148015dd217695881932e727fba7a679655376552314522fff4 2 @@ -10744 +10744 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 978e80a4cf481b4cc65fd34c629f8f1ed6bfae42b9aae82cb713cbc5dbb8b751 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html a10a0db23b96cdaf64baf045dd2161cace78abf4c0e910919e6bfaf3ceb2de69 2 @@ -10754 +10754 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html e0925431d03a3f9f883d9c0e2a1f9d949b2ff56f843bd6da6dfc8029f50c99bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 44b0807a5ea194210832f7da28620a82a397abfb4dd2123b9bfdcd1527363cbe 2 @@ -10756 +10756 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html b0ef597726ecb10ff9572c7ad0bfec0048c5db33773ea4f9dade4209b66e3248 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 839e4f7f3760d8ef0d20b04b4b6ff1b399a59cb25cbe457a59e7512b5ac1d1b4 2 @@ -10760 +10760 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html e702bc6ba73453bb80dbb4afb3c3fad266eba9309e3653a1f77f2504c56f4c35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 73cc65770cd463ef2d6d1c4e5fe6b07b80a9f1a56dca8ef4fb524664509d5b1e 2 @@ -10774 +10774 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 159a08da5a62f5b992a2a154867e3b6263febee42b4ab03ea34b6cdbf7cdeddf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 1554fed7a3dcf30a126edac4898f0bff6619715aebd356460f9aa91cd6c1996f 2 @@ -10777 +10777 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 9d3c351922666ebf15274d29253efd312f5212bd1a57b24367f7e758fe374e95 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html f6cea994fc288fe376fc002dba2e20a9d83761b2b5db8bd512e6f3c46af4950a 2 @@ -10787 +10787 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 37366fc1946c80ff06af344d6437e8a23e0a36ae685b72021fa506534ba59ffe 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 11c1f36715f6b3c6ac47af63e397cd3cac93caf59175ed90d85ef3c3cc4e113b 2 @@ -10790 +10790 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 7e20b00d9f980bd16392228b2264725f0eabd4975e04f502f29effc6ad6f12bc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html f204eaa1e65065b7bd8813b5894f46667ba12e09645981dd934bb7a2bd733e1f 2 @@ -10806 +10806 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 4d70ce5f3246a943fb60e9584f5769958e701e429bd3bef1f26e6b5907635407 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 30455a3251c33d3161cf410bba421918a0dbd0acba52d63a324609ce50221bcb 2 @@ -10819 +10819 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 913ce8879074ebaf3d50fb1f632397ba5baacd5803a5446b4c6a871160186bc7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 4324b4bf7e63ce63ae1687029b172ec59944222b2b045f6cd7c53c6e4dd85a65 2 @@ -10833 +10833 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 24ca615e436eaace2de9a194ab56cc3a1787128f05b2af161e68d34b370718c0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html a3741c08f6b570d48665e2252f3249b5459d859dffb36819ad990adecb83fc62 2 @@ -10862 +10862 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html c8dd0c208ee9d57ae4c35bc13a02c37867c915672f955508bf479f63d6e5f0ca 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 9c9ffd3a41098999bc4301a67178841256d0cf2d0bde427505f261c71326b6eb 2 @@ -10867 +10867 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 6147e0a1f95363bcf7e6cda43fc41e0b7ca574c79c8b5315fce6f8696da5cb21 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 00ebe29f33c117967d5fbb2386ce861f020eafe368e63c55221af61dc833bf6f 2 @@ -10870 +10870 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html ff70e2e6f09966b1bc8a84e1b88e1fccea9073cae17ed1a82af4b1cbf26cbfaf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 51243ae1440adbc60cd2b50fdc316f3b83926d7d0508cc0af9287b1f7f80e570 2 @@ -10887 +10887 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 507953d9670cd9baf3475cd54a9b6229d57205917e5cb020b2f056d530359b5b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 4128d138dbd888ad136437d8f2831ce6f59eb3b907944ec8b809a0ac43e6fe7a 2 @@ -10889 +10889 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 64ef1d21618d8df2ca4da2119adc81802a2d191d8c7c88ba9a738640eeb21e0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 898503658b309e665367f2740a49b18ae8385b17e17011dd23cbb9b87a5f01a8 2 @@ -10891 +10891 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 8bacf10076c88a96b90cb80937b7488889bcc2aae1d19f9e897b1c33053dacd0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 5702ad4ada1c40bf5144938cf189e4dc2e927bba17e8032561cac5f087fc3f03 2 @@ -10900 +10900 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 5412a6c9ae8cb43d38aa00e5319595dc2dd10d97d0cd65acbe6ba7f999cc1253 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 8e6cbea72d742d0c5dadeca489595ef5ec48eb9b3a0c90ae7f0832be03cf6d90 2 @@ -10912 +10912 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html 38ca5bfb074c0d6439d109897f9ded009f0e69c56d6bc3ad1b5519f3193b1ae2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html b74bc5b3516a014560fad74786bd0a04b099d1f4e02cc7615ae0158cbd3dfc53 2 @@ -10915 +10915 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2b463ac50ee859116b25d84040133f842f5ecbe1d25e00efeedbe6bac1aded64 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 898e41f4589f4c924c852d50c74c6540041ef5dd024a0ba66de99ffdaf9593fc 2 @@ -10927 +10927 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 1ca8ed9c29976f160fcb07a94b2decb2b4c112d426dcc4696a0e4a67b23ccfa3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html f90e84636c0ac08f4f599b6f58cfc689cf2a75069594bfa875478cf35306039d 2 @@ -10938 +10938 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html ce5ad510ca02db71e4637aaab69cb67bb1a757d496fa4958093acf4cb3d2795b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html a432dc18923d67a320247af4a42e74069ce87502a2ab5d426360da8a5ef2350f 2 @@ -10949 +10949 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html b2a3488aa6c09f512bbde97d0d712242fcfbae90ca1d430464467d13181e9001 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 8cad695b56cd83907090a63d0a8fa9e39da47b28274c757a0e623dd08c589763 2 @@ -10956 +10956 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html d06c1fa15a051caf9f9c6bdd802a02d277f52984fb91573943dfc553273de090 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html fb50ad1731f77269f15c3d49a1f63f385df7dcfaca1509b3f457a37a0b07ec86 2 @@ -10959 +10959 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 69e7c473aa662c5429ad7130a97992cf7daf7f3e74b9d1caab50772440f92535 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 8e8c17df40e5c7bbf42da9559671f6a1436307f905d2d9f06f22c8ec2cc68829 2 @@ -10962 +10962 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 992ad229af357faf0763a9c469ca345962daba1089bf1fe8c414ae095c8f76bb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 1cf63d8ee0d1566e753fffa6dd232f5986494d4aceaf3870ce692df7f81e955e 2 @@ -10970 +10970 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2e5d09f60a908323d6bf96ea8fc7acb88b0e924658e09290263def8947ec6f77 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html c0e24dcf7e7b63b4a1b986d4320513c8f077c0e1d97749cc06e5b30c42d2e6fa 2 @@ -10972 +10972 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html b6da72d08368acb9dfc760f24b268ac726a51c590a6feb240b2bdea7a90546d4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 85712d332fdb9903cbcc34990c4875b7b67e58fd665931bfe0289a652bc87c6a 2 @@ -10974 +10974 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html f1106c772c0e89db0a8df8a0ccc4f48a3a2795562ca8562f662b99060c1683cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html a6a9daa10546b18b44cd262c23d23027b0d9c0be23b4f8f390cd5d3a1be41439 2 @@ -10977 +10977 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 7583b6e8e82c53b35093682dd1a6bc5508eed7d0c579549faef97a2776ab3e83 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 48327ccaa735c6bb3cb427e3accfddb0d3b9d42e8dfcf18ffebbc66155a971ac 2 @@ -10981 +10981 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 148425474381ad6edb20d701b02e47cbd3c1e7ae461969c47c5ced6d7c0b0344 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 22a9d8efb900cbc571ff1ae097888c16c1839f8d98fb32044eb83a9b0d0502b4 2 @@ -10984 +10984 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html f1f93ffe8df6963605535256034bfc309fbeee39d3de4f4f82180436303fc00b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFacePointEvaluation.html f39480ba18c8febe113863537a22773a057b0b3c77fe1e0572b914c26c99a240 2 @@ -10987 +10987 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 5e6eb28ef35e741a437764e4a6287bbf6dd704fa585aaa20af6580e5cbb75da0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 4673dc5cc936cbd3249d88734a5389f70b4a817e0934f7635d308d90ae920338 2 @@ -10989 +10989 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 359cdb8177535d6a3d05a4e1b2cd822bf959ddf92291ddd741ce8a5a4a8d0a8d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html b2ed820545b5c1e9035aed3405f377757023e1085a7186d94e7cdecaa0859a72 2 @@ -10993 +10993 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html c15c8f1b57972ebea82f781d910c90a26b241f3cd9db3fcde719c813172388b9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html efdb78ad687003ec6cec93367b6c240e4e1f42c211af4a33ae09a4f991476498 2 @@ -10999 +10999 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 83e4a420721a960467d3ecc9f010e3622ae40ac0602134c26a626e2bb4c771cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 28f2b78ed63ea6a1fd60e44f858466fd0505c1cb3d96a9a96c787cdbe07578f9 2 @@ -11002 +11002 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 8462c0460617a817510e3c441b7724aa5ba8c809c73f04bf29702ae1adc8342a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2e8e14ed0e1c3c2c9786f9d10f2a5dd88cc4e2a3888b6f3c5e75ad5be69d1496 2 @@ -11005 +11005 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html 361f79f24457120dcb7937d7a8990d3bfe9e8bcbe8a92eaf0e60de046a350e93 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluation.html c3c4bc0a38ac301da41632ea2868887742fd09a702c615b71ddbdcbf5f3029a5 2 @@ -11007 +11007 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html 4fb6db5d7eb84be27409b33f58e99c404c07c5964a4225868067f442203ef8f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEPointEvaluationBase.html 79979c2da891f8357833ce5e9213ba9a6e26f27dc5fffad4bed02c078af3914a 2 @@ -11018 +11018 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 97363c37a6af927407d43bb12026b7fd48282b1d579ad18d0e37a8dfe3071b48 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 541a275162626349041b2039086a87c8dc97a4ce2f82554375ea61fcc14a46ee 2 @@ -11021 +11021 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 91a53957f8ae3fa5e215449a9777b625e94821f415e7a3728be0184218225585 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 7c4e85d4da9f4d7beb8647b2d1b12f30508e042f591f2b5b90cf9102c5144888 2 @@ -11024 +11024 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html c56f4024175f4d0c2e1d9de06d2712b500b86db19047f0958704ee0ad7313e71 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 36565a9c7cb795a884d2104320914651bcbc5a949ca68937ec398d715b76030c 2 @@ -11027 +11027 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 7a8169345fdfd82bfd36f8fe940c95f4baff6ad79106cf116c152bf8f708bcec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 0b4177e6105a9066fc82cb9dcb2ffc671f07294dfd392c74196bbccf6165e204 2 @@ -11039 +11039 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 11ee19c06947880bfc24f35c1ced3f9787d950f7a6491fa5be785c556ce0aafc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html cd6510b477d2b9ae7a1d04b033555af40c1cc0eec4bdc6259a827eadff2df992 2 @@ -11041 +11041 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 1267f1ebc75b9733326bdfae280e15d5e3fe0fa280d2dcf4463b8e79b5a508c2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 5fcb8d38be11ee0240f83046402c245d1d7af466ef09727908f56c66cd3ef97c 2 @@ -11046 +11046 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 20751b28a6e64a37a01180ff636f6ad8f9f2f86068d718aea5ffa3814d177233 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 48a22fdf17f44d3b8afee544942f471991c47cd9288524f58f46d0d406989ff7 2 @@ -11048 +11048 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 74eb1f3dd103854c788f784a75690b21ae8856a70c39dae3459f91e8a8b635e8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 477f1c9d4a93efe236f6fa7fc497fea109b366ff725180fbb5b2bae6b5cda5c8 2 @@ -11051 +11051 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 13e5cac97de05a1a262f4c6bda17146a85b0a3c564786285c085dfd6ba2b8cc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html c5d82306af7b3fd16324c15873ea44d04d7b263625fe3149272855033ea57f4b 2 @@ -11054 +11054 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 4205224463f0d903240f72beab0035d5d2e884d3139be60befa1920ba11d5e51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html a692dcd7d3001f244a7bc7abb34095dc31e8bc87bbe1efa6f34bed80f4c987ae 2 @@ -11056 +11056 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html fbdb5d7a500a033227e8a2d2019e8fd6e9bf865a2310590ee9109e29ad4c576a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 3e0e107e8037a7dcb68f4306ffa94005d5f13d6cfe4dc418fcc47a91d42256c9 2 @@ -11059 +11059 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html d301c26238f45d5c3e97e7d6dd2426df9cf078586bf9d42e7c1d09a2f2f711cf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 119064929b206a2c5f9a16794552079c9730872320b432e41c3d05836e29ba99 2 @@ -11065 +11065 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 91b716953929e9c54d28bf301da879f77051c403aabd0e67ecc039531210ff3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html c42ab697cb8be3e48878bfaa4a22e4e9571886cc4cc2ef8499ebc85d390ff2a1 2 @@ -11068 +11068 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 30575597ba09781dbca71e76b2ca33ed88d80179414f2a798046e23849adcd82 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html b3d953506a85506a5e213d3f06c6bf82ec6b1e0f50ae3c70b5c4f234ef10d262 2 @@ -11071 +11071 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html cbba418bde75bedc18488a6d72d6b3f0378eb8e08712a9e2ead06501bb40735e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html a5a6b2ff73f85f0793b71ef4fc115d3747356f922148b6d9582012917b7cb2da 2 @@ -11074 +11074 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 392a5baac76ef7a73a77e64b7d49cc71b75d6c3200712184d9af7119f98ca4d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html e3273eaf3e6d632d9026894f21ff08b67a141b3e245c18681f6ee61b124c7b0e 2 @@ -11077 +11077 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html fae4131f1eefe497676081da87d5344ced7aa44d1344d532cfc722a29fc69bc1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 48f22d9f782eb34046fffcfae501924ec0b392859195579faa2e64291a2e45bd 2 @@ -11080 +11080 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 85c6d3a59bdd901df83ac045b372568c345e1cfdfe8f68d5b41651868bda8bf5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 43711569f54792511cf1a6188231658b6504cc32d39420e628a2d49ef7b77464 2 @@ -11082 +11082 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 23ef8eacf90be8171fd45bbfbf62b8150d410cb05261b62bf3e1bc1a42cd4ba2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html e48c281e21f4802427a8d413f72c0e63440c86a3def13df8741356891a83743a 2 @@ -11085 +11085 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 57857543caab919ca0083a431a2a8e1666149408945e95f42298d8051f1e61e5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html d26f17178772b69011cf34c713289db2ff3b290c69db790cfc07e9b698cbebff 2 @@ -11089 +11089 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html c6adb48a32f542c4205caad75a8b90cc4e156a8c465fadaaf4cdcf7b6783581a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html ffcc9795b830fce9ad0bf58f1e3cd036762d1944c5230be117beafe385aaba12 2 @@ -11091 +11091 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html feaf491bad5cc5055594ef7ed9f4c1db380f7e8da13b700e52ae33b69b3323c7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html ff5b638c0dbb1ceae5faaf6d6cbc57576d7b0e54af09e5b59c34c7c2bf44c167 2 @@ -11094 +11094 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 0c972c42b32fe9cdaf3cfced7603a3da82cca649fa15b8d5619d1f66235d20c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 9b05e2a1f5e377d61b6099d0bd66a4c0a75e90a710f62ed62a89f5a2b51b0be7 2 @@ -11097 +11097 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html a833c5f076032aa288ab2324529b38a8b6d94d1aca03604acd826db33e5c7b8b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 6479a77203f0851add1cf291724c131fa6d7e35b80a789dd57aceb7b2657afac 2 @@ -11101 +11101 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2b71eb4a22498ac344b459eb6f9d9d033dbae0337878125152b878aee8f3336c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html fc6120c21e13237f64e242e50c7ff602dc0c36cde0b10f1b35781d3fb79e4cd0 2 @@ -11104 +11104 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html f416e9d6d027da5d3344c1df1f5110f52d951d9197aab1e5842134343937127d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html bafe093f8cc2d96de793ca78b1762ab20db0546ce50c4ea953c0c584afce5644 2 @@ -11110 +11110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 0dccc711ebf837ed354fc3d82ebf63b53fe52718ef92a519a1dd8b651519bea4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 697e9d5c0824d3faf972a18bce2df870911aaf5952d57507d2102c83945b5099 2 @@ -11116 +11116 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 1001649d5249cd11157baa8bbc6ace22ca4bc94c93a3de79d3d5295b18db9918 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 168684b4f1614ae21a79eca3afc8be30d31baaee2e19e78f6928e627b5635375 2 @@ -11118 +11118 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 9781d133f08121f787f21f42077a651718d8c741bc685aff9da926d4b6d9d917 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 7b0c155fa60a0ca563433ee5c520ebfa4b5e10ca502ace331d1306b73f2527a7 2 @@ -11122 +11122 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2f948443b7eeaf70c58f368d6ca9aa6338ecb89596fbcd035b1a2e06ee635fb9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 9b502d3de36aff1a4e443320a5150d02aaa24227fe636942a4d6b516846bfe3c 2 @@ -11124 +11124 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 3784ede867cb6022436a419b23aeb55ee228df6fb928d698172eca56d4f1958b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 106d014f4d182d6cafccccf72c2a789b85f1976d78d3d4eb147c849cf0d37ee5 2 @@ -11128 +11128 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 02c25a1b848f061f2e79c9f2d512a98d824e31d4f234e23a7a49789ba61b0295 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html bf845da340fde9d489b774bf338da465a12eb255d50daa8384a6a2814db6abbb 2 @@ -11131 +11131 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html c3748873549a27d6182e70f7108c85ea6e7472d32a03e7a9c42d3707c242d17c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 6b69af78aaca0a00160d7bbd5c82199e62677bc4386db69c33ab6c135160e4e8 2 @@ -11133 +11133 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 7be29f4c7e18e8484b9e7f6634a10df40e32a3a3334f98d862565b47ba685f9b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 8c9ecfbfe671f59db14ef6306306ecf0ec69c0aeb03b3fb369f1bb3c9cb9a01d 2 @@ -11135 +11135 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2318595b69d89e1c0ac1fe4c84f23381ebce7d18ef91e04ec6d04aa1ea7396da 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 77d15f9afa40fa44724d7989a1a4a0c43a8d1a978c41db8d778c051da746cbb1 2 @@ -11140 +11140 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 75441a209fef0db6224a4c4f6284d99d338f44021cb02b006abb9e513373c7d7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 22c298add528fd45216318aca86d8929ca0618eb6375da603796abc20da7b5e2 2 @@ -11143 +11143 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html fed5af95a77ef69db7ca4aadb4155a7b9ae28a1ce9b18cab26d6389b50be8ace 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 510e64930e21e2cfe17c4451c77bc5f76e6306d831ef7907a8ac11c03be760c5 2 @@ -11146 +11146 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 3cee6ff1adff19e2adc434cdbd16852b9d925160cb916450424ff750b49d1541 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2afcf95da8cdb880a87e323678720d9b25692f8be875ab7e96d572e711d79970 2 @@ -11148 +11148 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 7921f5077299cf0dccf58328113812afba108299de4991536d603750c2e205c0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html bcecaa7aa5bcdfe8789fc70158acf280206053b9438a17ec7d1ac3d00edbcaae 2 @@ -11154 +11154 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 1d1c266a14d26f8df2a653e2fc4c34979cdbce5aece91e49e3221d66518e927f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 5472b3ee351d279e425e7179af3530e96e484069eadefd4b8376f06b520cf8d8 2 @@ -11164 +11164 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html bda07fcece1cb7866a4e0a55a4fce877d710d546d65df7d183a89dd9599309bd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 7308fa517d6ad5567ec800b0a25c1fe7f10f37e9b355a30743330624559f6b3e 2 @@ -11167 +11167 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html e5fc6b06cf70305862b14d702b218a971081c6da45ea047b2ee6039585bb2162 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html c78a8a6e9b68f94b75d2e9b726f3ad3434a8f9919d0ad6fcd5938903cf3bcae4 2 @@ -11170 +11170 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 7018456062235599001a729733117566ca79a547fc287808651ee7c576e82e0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 8ff5df98e94e2531752f9dba48c7d1268ed13c927f3cc191feb92d7b75b11ebd 2 @@ -11173 +11173 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html dc47e2affba63d3f810d9cb36f1ec6dbc78ede6496fccd286d604f06ebf61a36 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 70417587f203dc5a403f5da548b2768ad486770dc9135836aed65ca14665dd6f 2 @@ -11175 +11175 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html a162c44bde70f126eacabac030070132cefe66c722bd2b460311cb8f67599b7f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 199fdaf313de45dc1986b9bc1ae49107cade0faf698a35a99765631a5625d13f 2 @@ -11178 +11178 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 9aa4e34a2ffd0b4f06232c7f012e4185d5e5e2c84eac44b0985d8bb9c9b3b133 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html c4fdf09d1d244844bc3e39cc2ce32239951e76c29deebea3329fa955a7c025aa 2 @@ -11181 +11181 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 764e8d14e5d436834aebfa4c3222609064e359c2aa9971683d8e44ff3a6ba79f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html b6adfb020ead8e483694138fb096e533d788c3c96770dc01fff034c742d649ff 2 @@ -11184 +11184 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 51b8fc903bc11a464712e9a0b8e39f185b418dc2880807a028265e907580e31e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 3c274c994535150f08e2729f84c875916644195cd76c1cd4a166a8ca7faacc24 2 @@ -11188 +11188 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 81dd5c0ca0003af689dc501e522abe69278ea72903b8061507aa3f57d09dc681 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html cce0cce664d44f425d825d9c632d7854f24273754362956852b64e048173511c 2 @@ -11191 +11191 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html b3252fcfcdb0ec8703d9702f08a2fefba1b4c7924d4d9ee49b12ff08be24dbce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 6ab4c4c4f7156709d32696033f4bf4d170ac64f6b41e6c70088a2946f26f8c16 2 @@ -11194 +11194 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 9cae1104918386e7717f2fbe95647d108762b3b9306530e09cda5096807ae61a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 8206ec361ae5fbe74cd907d8ff1109bde50a0b7fe146f134c99aebb2aa018345 2 @@ -11197 +11197 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 22066a5285949d98247440241edb94db9a5be1ba29c4cb1167d8d8cf86eab9f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 1daa3c95820668f14179be42009db2dd4d4163d1dace9198a61dd412d61daac2 2 @@ -11199 +11199 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 271de6c5aa4764ffba395bcf31f9d89037a274cabaf60183c4ffd4e97e38b9f6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 0ead7071d969fb8cb20715bf9b5ed77af1557ce13e941bea90c2b812c3a7a539 2 @@ -11203 +11203 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 6a2376f43d8509b11b812e9fc6f7170de490ed1d83d31da035c3dd8b04bc2535 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 64e5abf0ea61caaeca665f1d13c5b95d2cfa97f317c2e8b13e121884d38a3ceb 2 @@ -11206 +11206 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html ace30f892f1707b36d980e0dd807d0baa70fc25b9fc45666c3a40e934eaffc02 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 819d36745aafcef6325ba7fe986bec5e4d11e9ba6d61ac73f3df41be7db14a37 2 @@ -11208 +11208 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html b2f22c89de19e6a684b46fd9e31e17a8b8a989899aa814582d710a9f322274ed 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html c1d9448e65b449aa571640575ddcbee723c334c0c8dd111cd062d03de902f5d9 2 @@ -11212 +11212 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 7bb5e2e9b5cb2768ccb90f2b4126b6af96ad6a35d2b1276fdf15983e6bff5c65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 8a767497aacf11c647088af860634dfe08634621821929c890a16d21a43268c6 2 @@ -11215 +11215 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 59b0c2979643988094d108cf33cedcc6ceb8277cfa2713b9e5fced60061ede8a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html df199bc68a5b69dfec2a84481a06ecde5ec948bc96ef4047f5d5be51aa5843e3 2 @@ -11217 +11217 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html eb724c23ee5da7252f7606f3222b8629f6c6c5280f13c9e9384163ad2fb6c06f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 05dcccba8dd846678d168350e0649c78537eb87467644a4a179e75e2b6ff6c8c 2 @@ -11221 +11221 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 29bd41520748384df591a5f2d3472ef73a4f87ad6394ff1207ce8c046d5717be 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 9a92b6eda2eab01581672df841c4182f134cad88ca8eea2d7bff483b9ec2425d 2 @@ -11224 +11224 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html dfcac28b6a3bcfd419a7887716d956f03068bf798fe30571ea612227681460b8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 7dca86fb92611d38624a4d4b80785d98bb7bc10485734b33d9eda30abad137cd 2 @@ -11227 +11227 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 94e00c620d5e278c4af0c473a8a65fd03d40a238f9f56291ab789449c14d073a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html fef276b7ed48fbee6692292c774ef514c2ffcd7fec6de28cc6551b036dd758ba 2 @@ -11239 +11239 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 305818afc67e629c7ca1bd046da4ebc20a71e91f3ce79ae31ea79cfb193be7b2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html b8b4fa284ce9148fb69baf711d3629814c34fba2de59971d4bf44f9029aa8074 2 @@ -11241 +11241 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 33d197347a8c9c0c452d1909fa0c91ef7a55a01dd6642eb372fdf11d68c25ec5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html ce98171ec27e86572f55597161c85b5bf39a1da541fc524597ec3b978c6e3bd6 2 @@ -11250 +11250 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html fc730e09ae9e215d924738e2eafe33a993f88c108c1b3dae4d3b39c078969d8c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html b04ad67b7b44d631c4604c1408db27f41be3ae35c642c99a90e9826ccc5aa23a 2 @@ -11253 +11253 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 482e010ece97f54efb194a4b17cb156129b54d91e63a9f0fb9a374e2ade59d28 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 29c37a0429ecc1390a0445af50944b1709e3112207d7e1cab4ba04a4c64c9f06 2 @@ -11257 +11257 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 659868230037a249253289b53ecd8b5a680e77b60fbf693d433a699d3ce2e39c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 5827d43b791fa78909eb288527dca055eb0cd7f711d91c37f11580bc0aedab56 2 @@ -11265 +11265 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html fff3ce07d7bff74a46a7460d8ed950dcac2e523d8b1720792b6c242f56907d10 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html a48bc7a90a65c6df6f2a612156147fc20f0db9054df8533be099048633f08626 2 @@ -11284 +11284 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 42f51d0e778b9eb27e6dceb222e6b5aea76af732952231fbcd1b7c07e215c214 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html a4afe2842b2587cc37295d5e75d1d8ce024c5f8680c908e9f9d578171be7095c 2 @@ -11338 +11338 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html f1e55d117ba45aaf0e3d79507e0b02a5e64e735627e515ffd33c090d7631fb44 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 6ed912e503bab36f59d7834e830cbc29a76ddf102576676c2dffe9382306e4ab 2 @@ -11341 +11341 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 9b23aa4a84e5706299081df6df0d586e61cb2f3b2ea059a75bce21dedfbad2e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 5eb0ffafb5b8a070f0e6b9990ce220dc7e24e7c0551f3780bf1590dbb57fd03d 2 @@ -11350 +11350 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 9bbab50fcb254e53825c1270f574371d9ad89252125cdf5833d3934a3be70009 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 86d222c2fb9238072408edae2013de8fd0545d91d5dc685a302b4fae2dcbce9c 2 @@ -11365 +11365 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html 6757f4351ec226203e4ebc52e8027c7c9fdc9a9bb401e56fb5b74402c1df9bf0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html d68536860beb69f4a2c365d17cda6f6e0b66243b794a1e5214516a830a8902a6 2 @@ -11371 +11371 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 84120bce6cbd3e8aa9441d256232392ce10ac2729e2bb8fb5c82dc1dbd2445f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 93a9b0dce1609b4687faa9b99df8c4d6398b63e059970a16a32b0dc31340d41a 2 @@ -11377 +11377 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html f4df85c59470dd59d704d07af3643f2dcf2eaf978c12cc7f5f48cde075f0b0f0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 80059748baf5fdb55a4c38495194605f6b5b9fb87fdc40f36960fa1ea4e6429d 2 @@ -11380 +11380 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 3af7f8dcffcde8e641b3219e05a0c8c8a1b1f7f898aaaa1bb1fcaaf90a63026b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html 02f7bea509d180aa489abb9835e4fad62da9a7f311c950301ae4f1773f1b0363 2 @@ -11383 +11383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 6218b96ac1126e8b2ea9ff2cb36451e481da9f00c6c72d1385ac4b2a06327286 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html eab29522a19b4f471de644bfcd94231e57d117826e32ca5129c89358160147ea 2 @@ -11386 +11386 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 949ac175ba9ec8f5f74a8139cc384778ca8f91ce011669a73e6198d8ed125fe3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 99521e2ca2425ded8c8459fa6ca391034dcdd0f5f75a9edc5ad1aa451c52753b 2 @@ -11389 +11389 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html d01018a2672612c951ae22ce92698aa1b0fbee701f515ea4dd1ee8a37d206403 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 95c38b21fc24b7ff8702dc704e12870660e379a155a0b2a53f263cef440655c1 2 @@ -11392 +11392 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 0a75b253c1edd801bad4908876826beab2ef7e74ae5feebc3e467122c50157b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html a97524cf64ce5e647a207984835d620a3493ffbf211ea07da56d2ac5c069cc54 2 @@ -11401 +11401 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html ce0a60f2a9ef2d4b9b79112f19f3e7276979bced78a54fec7027493e51c47158 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 003acfaff7880865e7a1483fe2cf0e0fbf03a3037ebb948148e1496c326df55c 2 @@ -11410 +11410 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html b0c6db78513dde22680953aa39cf28aacd9a0539ce030af430c8ac2d29ac4a78 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 4f7513446771a694d85571bd27ca39345dc4c243eebbea09a42b8757728174e6 2 @@ -11447 +11447 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html cbd5c2c1e0399f441effe44cdb40d2db44972df1cd7532beaea4ce4d0bb9de50 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html b005ae8904c15a11ce6cc36be4480a91746b7ba366f3a7e187aa01fe52935dbe 2 @@ -11485 +11485 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 43b15a6cdd0869b8d3e5aff468387ea3418097a28b4d1b9453e8a3999b550361 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html c6cf2e8250a36d46d9e4c4013dd94af17afcedd73fdc5df93d24c78d8bb34aaf 2 @@ -11488 +11488 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 747031ef6a14a5075a269799bf403db4a2ccbc1cf34d2c4ef20f4bd35ea3afdd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 84dcefb476f124b2dca98fbac5792bb585343577c2c9288d208bf1d07dd2c65d 2 @@ -11490 +11490 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2504b0e639f37a8817bebd4ebff15a9b9bb67775e069c19bdd31acbf6b1d77b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html e5a5230712ee576b978cb24547558633a60828011ca1c4d6e2403fc3fd7b7d16 2 @@ -11493 +11493 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 427ce0f406af95b518fcd4a58a9526610b58c63a3b1e08fb0f9787cb56dcdbaf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 30e168e0416417682bfe0e55564a7fd6ab70f7a9b33bc8b9857e1461c402d23e 2 @@ -11506 +11506 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html b485a1aabe64c31900aec4fc66c755a2993cd8c37f2034f00ad53b1a1be5679e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 3b22859bcca81afbe4d076510afba053583033d40f74ec29ef6c737eb7ad37ec 2 @@ -11548 +11548 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 46fa01280695d0080dc246de3592f84d6dc5a4b3bf94f2ca713e00333b8fd2ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 81c3575e4d3fcf972041efcd40481bfac40e9c68145ea2a69c1699ef591fbc03 2 @@ -11557 +11557 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html d1b4e3289ab277c52a26c7ef262762ef4ec877198d3e4abc643cb1808a5fb98f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html b30dc520a30e1188075191fa8070c0748d3231d56e1e0e9000af9151063c2044 2 @@ -11564 +11564 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 6a067bcbe829c65c51df489b4a2693992cadffe69ad3e820dd13c17bc8535b3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html b3a4482707f977903dd75670bddebc6f9dde312e309fb7fa6d497739728da8a2 2 @@ -11574 +11574 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2616e96eb069545177e5da7e62ff9ec36bccd9d20ed41db3a431ee777673a0e5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html b564204d01e03fcf8525aac011544bc6b35cfc7d24e51d1e5e39be38b74ec6c5 2 @@ -11579 +11579 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 766c061e0a6b4c2f5b8b5c77ccdcfce2741ec55f2ca551c8a634321e3c4a90c9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html f7525b39c6485bf32a35e1b2a46c01e8f6de4eb54682a555572dd39cab093df7 2 @@ -11582 +11582 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html fde9bf01af7ab5d33ce0d32e2e7341ebe2a9813c21c97a24259edca954234b0a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html b25f2744241224c306f9e28d3d637f97588452e863e116ce81ab77cdf128ea1e 2 @@ -11600 +11600 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 81e0037ef3df5dda5d84eee814896e7a9a10f60398451a9153d596f3bd446910 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html dd092fb9589c98ee0e5532ab9be6965fe76ba3d668b609df0c22e511432a2656 2 @@ -11619 +11619 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 261b59fab6ead83241044dab6ba264f440d6550286499d898df9f6ccb1a7eeb6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html ddcbd3ba41c921897253466cecfb05326bb50f1aebc3df12b6de6549a761a196 2 @@ -11633 +11633 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 60d19d7da39d902440f37bd489268b84470d93b1f1889c61866903140304cfdc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 5b3f276ad360991839da07ea658fec737ba5aecc92a18faf67a49f90a4cee3b9 2 @@ -11636 +11636 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html ef563cac132e151bf6b1a6a55f7a66013c40fbc11c66b68300735685376decb1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 7d9df2c9bcdf109f7a27ae37f1c455f46ff9194d2feeb7c7ece2ef2afa81e304 2 @@ -11642 +11642 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 280ed5325adfd48680df32fbb84fb4e45b46df7125a43ca2bab53f06fc3425fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 7833fda8bec38f6a4cf90ebc3f48897b01bdfc509248a29db7bc550e5fd1f9d0 2 @@ -11750 +11750 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 8c94a49fc614971c2553ab6bc4c16f6e5f806d68528f15c099faeff4d11c6107 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html d536418a44606151f6c7e026e914646debfc37b20427c9e25f182b30a486b4ae 2 @@ -11753 +11753 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html b439ff101af4407eb3701d1f6fe157eb6453df87895237af16e7b6d9e6bddba4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 71620822fece4a716c7dfbe9abcea04c59b8f996928e7b5c7be9949c161c70a9 2 @@ -11755 +11755 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html fd0c791964cb6c8bcb7ef59778d19f24840bbdf9ea0de64f4b865c6d81ce25fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html 13ed3fcab8581fbf20eb26caaaf4847e1fed17dbf116ce759ccc5b5b240f77fc 2 @@ -11758 +11758 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html fb2513e842446ce3972c56c25812a9ac3f86b72a72eada7be22bd5ba87cbce4c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html 8384a7b70e603a458be8db2f47f94e6c5377b745128c5085f26bf264d5878ab1 2 @@ -11764 +11764 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 21601120d59a53d7078f813b841f709ad42965b83c02efc2148741de85dd1ebb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 7d0d094f43814f3f9ba888ac2094b6160389cef3aaa53b2653248c21bcd46b06 2 @@ -11766 +11766 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html a76a59d248bee73dbed9525159eab4af3aa1863db5750ef7a93f348681cbea12 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html a8fb5d0e294e1fab77b3246f7e4d491db97c31cc9f31436323d7eaaffac456f6 2 @@ -11776 +11776 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html 0af8ee95044316c7facaf40ad1651b9c8d527b1c64141af035dee13df3b447e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html f818e648935eef254c6d0c3ab57241d7a2d826ffaa04cf4a4eb98e7db2021bc5 2 @@ -11783 +11783 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html acd6a0dda26e97068f41a9f529b35cdc80d46bcaa656e456793b43a6c76214ee 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html dd9de5e4cf60aead2a6f92f2c9904b1b94288ac390e0ef3a28fb5a9a379b6e9f 2 @@ -11785 +11785 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html f1b0d12cd4573a41810bb41fa0cf80edb6bd28b10223648400d0004c5df84a0c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html 44e28e71ff4d063b97d0aa577585bdda6dc86969b6f7761b7042f7cc6037d4d0 2 @@ -11787 +11787 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html fe57c58622a60a1971b4c31b274f6b8d54eb57d1d2bdf114d76a9e49abef69e5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html 61f3650bae15e305ac393a3653bc2653919b8ffc98842b42ed352323710446e5 2 @@ -11791 +11791 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html 7bcfbca9f85ff36a16c3a4a6d17950918388032f0b41d121702ff1e60bc3620b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html d06e96be3674771a6df5874a7c8d665066627afd577408b51ad7ab5ee66cd8ca 2 @@ -11794 +11794 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html fac40f2bbf22606de132300a68e76f4566b46ce8c730d9263d6b3e66db743d3a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html 2dc71b50b59890e0d387f907e8020af89f6246c7428a17e518a2a48417bc7682 2 @@ -11800 +11800 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html b7a6e2e8a067e9ac8f13e14dbe28bcf3ebc3a9769a564933a10d9877225fe553 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html 17cf4822101bfefde539d10171b6682962f1bb8c8d7bf79202aab4a180b55587 2 @@ -11870 +11870 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html cd7e60cdb42d13e4ae318c495ebc1132bb9928f6ebbcf8eb9a6125f5742e4ef3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1Assembler_1_1ResidualLocalBlocksToGlobalBlocks.html 57d88eca8096f85ffa517f67c1b2d8cf4c3fbe1a5bf3647bb30cd545ee1665ee 2 @@ -11880 +11880 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 783416fae1a67f151f77674a031c932e0642cae6e426721763c642a3484b9347 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMeshWorker_1_1DoFInfoBox.html 8c5fbc2a1bb34e44b27c725fef65818dc277e8adcceb1f16b498a739cb4692f5 2 @@ -11914 +11914 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html a90c9e28d94b12b95d61b0dd8a863d15db9ba57f6618a484d78387c5378e798e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classMultipleParameterLoop.html f93445092a8af6f6dc0a60ce99db29895fa92b010188faf1f555188d7791f600 2 @@ -11925 +11925 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 28ba3d8d1574a424788a71832523464308fbaada58410fb3ac4f1e23a3afcc68 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html bf630d8cc8851a7295020ab8d448a09e4755800184c598eea095f91b2909930a 2 @@ -11928 +11928 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 9903b045bb263441a9ee6f6a2ad15bb69cffe8d61870bd95db4cde08521023a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html bfc942a0c2f8d7a9905bf4e5edd46299e0a7f9c1c697b31ed99c9f4847f97a73 2 @@ -11931 +11931 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 846b5ea5a0a78638f907078e3266f834969c19880e90e3e43e7c9dd10f5d6457 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 5691af9502b9c3224fe7a2840f6d7025b37eff07405275b58af6b1b2ca5c489e 2 @@ -11934 +11934 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 9501844595d7e57fc96adb1735368316ca9eed33fad2f988a72581941fe09543 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html f6aca76b79c490f7485722957fb88c517a0b2ef70d67d48f763470f693c5e939 2 @@ -11936 +11936 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 506a7645dfcb53fad941b3cd63cff8e5d8a2cb9a22a73700a0516e59ee09a038 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 4ea8245cc570e7d6f8cd7e2a9c84641948014b696449aa51cf9c80faa42fe449 2 @@ -11938 +11938 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 5fb2259831b004fe3a15ef6584e92f5df26089e966fab6dc0a207d43bc5f88f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html b36bc42ef1b822d1fffd743fdb056013c67c3c6d712d45e59a07ef0fcc0b0dc8 2 @@ -11940 +11940 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html a8b5b64a00705ffed843ff9cc8fc7376ae576b24091c60f2bd5cf8a9eeb1d5b5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html f5cbd2a269065265199c937f6c5932514e74232e4e045efd6ff945b31af680e2 2 @@ -11943 +11943 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html ec8f6436e319ca1ed0881e31cab6117d2ed1c48bcd90d8a978e255996bbf17cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 3fb7d83c45d43b21cc18a8f04424beaec4db54373dda47afa3ad3f77e9db0865 2 @@ -11952 +11952 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 6fb9ad008543c5093267fff941bddfcbbe7c5bacd485bab39c41dc88c4ed380f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html a4466c93241e857265c22e072d045d7cac6f11819997325eb8824a3409597b55 2 @@ -11960 +11960 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 855ff146a865edb9f729ff3b8eab763e1d82784d210896521da73bcae1028fb4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 28d04d5645d7f65f69bbabe8f066bb07c5a94b95cfe9a140523c3a3d2dc25f18 2 @@ -11975 +11975 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 5acbb1df2209424f6407ab6d133f5f95c8a2200e20f9e8c6e10d29fff06db2a3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html c9d9732b43001398fc23582b91e467beded38a797a952dc844786c1a864ae24f 2 @@ -11980 +11980 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 427a96562eb5c4294a71440738624dabee9f308f0fc0f5ae331335ac4fac5855 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html cf9cdecbd9b29579bf5fc08d1d840c4458c99d5eec4e49c95159bced0612d831 2 @@ -11984 +11984 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html ee3aad585bbde212b051ee205aaaa373a49f4a910af5ca3364ee0266823f3d9d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 39153b5b166bccabe59b1e85ee0b706fda0140c7de0307de9af330462eb274e4 2 @@ -11986 +11986 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 9a243afef54eb791b61ce8820b0378f75b398d7e6e6c5de8b9c05c24f088ed51 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 345431871c133492d7369be471b1eb7dcb65f8dbcb20745560b042a42b7a8b6b 2 @@ -11988 +11988 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 935c8280ab2222c490c6b87c8632450456e7c15564b3d942411632fe662bb492 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 267b6cce23f1b82b6ea094036e859536e2b5d6b6a986422ebc916e35af4cb646 2 @@ -11990 +11990 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html d6e4a8e2ba90cd3afc64f20add30e5aaabd99d05aabd0784d6e61ab3aba12898 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html d80a56882b1399989a9da061553f1d9b8fdbc3bcaa39c83a06ffb11f1f6fce6b 2 @@ -11992 +11992 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html c8667cd8145702e1fa6c7d201e1e96b30cf3db20ef9742d95ce6465f3c9d5974 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html b54b53c54725154a21296929b718ae740aef4b3d4d4b62290f169c01bab80307 2 @@ -11994 +11994 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 14ae0de095b3b1637b8a34c7fbd4549e4ee6b590c4f470d2e5e9c9f0f24e7bf8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 559f688073ef6e426deaa232c60541ef44cac3dea4baaeb01bfc50dac34825cd 2 @@ -11997 +11997 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 01f26c5584e0f499292b4aae2dbce2abd4be46a34e329359be8ec33ac7633e35 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 7c68f42e2c1723f228e0b8c0a0fef2a4dfe8a1e43b33c9e1f9b2a89907059d21 2 @@ -12000 +12000 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 6ffd4282e13ab1ee9e85d9c09e48447ef561a92764695bc66b65308a0d2c22eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2ec32b65f1cca580639048890f5f8d4fb835c104e72caabf29a3baadd6a0213c 2 @@ -12003 +12003 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 5b7a7421a98f550fa3330335a74303cba366d55cfdffafb07e3e0ce03246303f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html b4a47c74ab34875ce6233f3288e9aaba36fb4bb05f913a569c091e2ae38277a4 2 @@ -12006 +12006 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 30c16b0fc5831d0d4d0b804d15293ce206f0fcc6ef680b54a1d92978858bbcad 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 5c0a44c90711242d0da2c32c24ce257dcda577fb94000514928da953c3e0feac 2 @@ -12009 +12009 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 996a74e5765458ca91ad4d4fded5ac399e98f306da5b86e2ab767b96c5784d7f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 8c35879871deb5ff3ab5f223e1facf8b23ec600f09e57b827e506457a49014fe 2 @@ -12012 +12012 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html b1859a2f2d287597fc986a2c8c614de19c2cc0f14834c9f4583ba067fb5b67ff 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 35b9ab5c90458d54ccd3b06cc5bc0d39174d47155b9435979787fca394bbce84 2 @@ -12015 +12015 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 049c28c48827a571063629fc744d084365dcd1436da29f2e5cf9d344e725ca81 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 19d6acb9bee48874ebbd344290372848bfd4ded1e695051143ab1157ae8934d9 2 @@ -12018 +12018 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html e8c1a396d9469c04c728d92b32a7d5abcf5af576bb3ab3a91f9ce22c6e4cc39b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html d28421c056c32304c08f3f27d446d9141e8f893844ae9935ebcd01e360e25d59 2 @@ -12021 +12021 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 0d6a21be987049cd59fe8e443fef264fcb273148201290f080a7651b96550f07 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html bac57c8021c392dd96cf9563b3e1d2ce0fb2db5c3ad0655f8243c8243cdee1ce 2 @@ -12024 +12024 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 52d915dd277f26feceba5f14878597da2d9034eb9638bbf1e33c23be7cc19b71 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 39c2d088c56261dfe894ebacc9e1743ff80be745bfddc346a5faecaaba0a9ae8 2 @@ -12027 +12027 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 1d685f23580eb3a444b2407c60aac8a932469e052edcced4a1f0d655b638a2ac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html a3b93748e4c8e1bd505757247195527fb9b79bcb0d8f43f5028d3dd56d7b9c16 2 @@ -12030 +12030 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html e61b84b5e2a7d233d2dd4b21093af41be04a392eef041a6276667d9f2ec70286 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 6db373338045cb69ffe4073fa3b0666e4e811c48c89926b8c78f1840146fa275 2 @@ -12033 +12033 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html fc0a37ad6eb0c31c639430ea4502bc24823654decabc3a428235bea42b980d0e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 66d13565d0948aaa04846e3177c62a95455e723259b9a5b319792088ff9e22ae 2 @@ -12040 +12040 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 7975f99af8deefc193f5ddeb1aefeebbe38f2fae98f133212d5d566c7cf4b263 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html f630b15dfe60b4c3005ff50fa8b87ea2a1485ff2ee87a5066222614337752626 2 @@ -12125 +12125 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseDirectMUMPS.html a86728d4ea715ffb6c4cd1cdea45f5258217a7fd5cb6aeb160496eb800d0f7d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseDirectMUMPS.html 9a470a2cb2ef03f9b2cac14cb16491f330861844313ca10b7bd5ab4f0de6793c 2 @@ -12128 +12128 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 8b8aec1305638d27ad73280dbc5e55f6b2101ce8e043a409d13d8a812dc5fc3e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html cc3d64181ca23eb6b7fe0df30083d054ba4141a708b2c76b0089bc92e594fce2 2 @@ -12131 +12131 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 6a876d1a97659ea0adf28b8cc43e5b1e2ee74e6931baee028c3595fc9233ab18 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html a513a180f02e5d78b44b14d068758211781e8cac241519014620cb99b7a7f9c9 2 @@ -12135 +12135 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 84d029fecdaa546f07a27cb93e235b027c44ba3db3765c1ef8c593c71b56c59d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 1627950812042bcab744e29fe74cfe617f02f5e096aebaf0593d0a63722e9165 2 @@ -12146 +12146 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html 78865c561c4d9ceda547b9e94f1e441fc9719a1c652326a48756d4e8dd405bba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParameterHandler.html 85d20446dcdc9a3c7157002a58f53fe6758c11ab12153457478a2e453874c2f6 2 @@ -12159 +12159 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 0f4289aef38f35e8a570765c5c8deaf1b850600f4f52ad08510ddfe40f4c1e3b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 7265df9f68e402317bc589148c12a7ee5f7d7ac8bd58a68b6332930a1f7803e9 2 @@ -12164 +12164 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 7579dade54414cefc3037eb8bef05fe92682ea28748c54a6f23bc3cbccefd51e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 3ce2c29d823300a710ab3d40bd44f30ce753867af4cf9191a502510e08184de1 2 @@ -12167 +12167 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html e501b8f31bc3a0618223f6d543e6b5c7a23e275fb6961d8ab1b107ff257a3533 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPathSearch.html 4fa074dbd835845172f2f50aa5e7c1b843e44cf06a698950575934b56951ee4e 2 @@ -12205 +12205 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 88fa68a10e623bb4a14dacb1518e0fd23feb3fcb58c94a7819433d4f0cbff166 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html a92782dbedadb589abce8fcfc398116840e438e8f9df839367a8901ac28c6aa6 2 @@ -12210 +12210 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 3e3f78c8157a3dc13a25a64b77387d134dae0595902494281767640bd786829b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 9020606457d45b63760eee7e3ae1e27f3b18b5c01a427b9ea9be651e60eafa0a 2 @@ -12215 +12215 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 328b83123755c06ece41e9c05d2d4f572afd25e2863cb0c246d3192091637ef1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 0481dcbae2a7d03f1ce71f896a50f92c15ed6bf3976d542c086bccbc5faf4f5e 2 @@ -12230 +12230 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html b0ed0158bb16289bf2dcfa98d03c7051e069eacc247f5e5a99c6551e77fbadfd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 8a6ad59e035d5cf7af9f690091a14e38f8a57fa2c44b7a2da6617a40892fb3a0 2 @@ -12233 +12233 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html c1e2f3abdf7b44ee3d76634dcaf190e3bc57a76ff8be208247869232852d6017 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html 01e35aac1cb779812f09f4bc7ccf431315b87a183f9c23e99a3c30db108bb729 2 @@ -12251 +12251 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 6fb3fcefb10d35efa3f3de0118baf82944097869bfdc8af4fc3a985ab3607556 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html dd5a64b59c211095fc7bcdc9ba889c8ce13f9e84d6a4f77637d6a9fa6e636c0c 2 @@ -12254 +12254 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 0117ceee2a4d5a17f0f2f7f3507b2d138e1e587753d465e236d2aa66ae56e0a6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 4d84c0086cab59716c67eaebd1910f2cd2bc8c1f67dff7b04f2d65dd141c32f0 2 @@ -12257 +12257 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html f2f0bb9de24c73fede78826182ac47675cf065352d94ccdad064a8af399d5cdd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html cafe4d2d1a328d78286d8f6cca399dfbc935c8118587cf33037307a2a6a70c63 2 @@ -12260 +12260 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 53e3277f8432c1166f835f2a6399a04b3b384a996f98f46a3e07d9e1703f40df 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 097a7d3358192fc7754d2b325aea97cfb276aa76b858ed1eb15db6c54b28b6e6 2 @@ -12263 +12263 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html d69ecaf2f36635188bcf3e83d87a809766577c7dc58fa1fa8417ab0e4cf4cc9d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 9285cb78b7a89c798393e54a627f62f2081af1225b021b90c135e8027ea66f9c 2 @@ -12266 +12266 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 061fc88337c07b1aae4acb430d78974bddc79292f607e919e5d4e5f711df462f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 5e6d9d633a2c757cf4f04c686fb50d7ed242a58a6aaa26e681ada1c7d9c84657 2 @@ -12269 +12269 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html b642dfd2ec3f7ed6919b7107e75891dda602f838975435ba3606f1b1760be995 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html f620a118d5ee5960f9766d47fc9dc6b87108f859dd3ca40736b19fada777d287 2 @@ -12275 +12275 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 76d810617d356dd29f6aef36b07f1d7e2593962322e8a0bb0ddc92808fb5741a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 65ae73a678d8749811e45bb32da40fff98d8c36b6f2fdafbbcd58fec7238def6 2 @@ -12278 +12278 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 79944776a1e0df70175cb39389d95104bbf024bc5d775d8f6ffafbb5b8e20779 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html bb14a2296a8d0906b66e0e43833d0d2533bafb6bcd426748cf9c081ca11d2a75 2 @@ -12327 +12327 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 1fa0e8505a7cbfa227f96767ba847058325fa68a717e0485f592a5dde0c773f5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 7d48737201146875dfa208812794c3181894e23054fca805bcefce05e405a7b2 2 @@ -12356 +12356 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 3c7ca542112ba071e0feb5aea37413835aafb749abfc0f2d78b658506fcd15fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html ddfc40ac20610771540c50a9f0231608917f2c6c71b60f92b06ab9db514e8cfd 2 @@ -12361 +12361 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html b61aae10fb61a7fa61abaae28362a896dcfca532018abec4feb5a2dcc0862ed2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html fd5425005ddec46d7aa1d80b7acc2bcea8db8165bec0c3f86a80a5065aa418c1 2 @@ -12364 +12364 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 08e26536844944f22163abe933066d8d4b94b6b090883d0695dcc6d70410ddd2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 11cca9060f9325c49e6eb338036a359cc7e9ca699523162f268a0a1065d424d6 2 @@ -12366 +12366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html d701ea4cc2963f56189dfe8c9086efc2d95e1cc976553fe4acbcadbcbed70911 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 39b249f587317c434f58fdb551baca353faed73712e909d61fdab194816967b2 2 @@ -12370 +12370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 09541a979fc8b9f8e450435692d233f01d61d817dc2a06984e3377919817ba16 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html f96b459fa34a8f0ff87c7eea9778e15f592ebe19f2ce11c86d8d3817600954c2 2 @@ -12372 +12372 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 518b43e8365d232ee978e925d52d5394591dff9f710021645dba8d8b0de7e8d0 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html f1a830570e2f82de45d33b8099c6bcf565998161bbb0a7426af897d719e38819 2 @@ -12376 +12376 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 0f558ab6989cd804521de07c4d18aa082e945851da329fecab86be826aef0449 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html c702a08528b15f37dbc5258b8567b0cfc658492c097972bc28e069800b53a666 2 @@ -12384 +12384 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 46aa0b38ea18c7898d9946fc46988ba90de9bc7942ee39141fa4626b08d84993 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 47f0858e4766c12e93e0f462eb58e8e75a890767496604742155b959c44be087 2 @@ -12388 +12388 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 76c36e327ef66ac0d23081659baccef92b3073f9ac2307693773a8f3078a16d3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html b5647650070a4051abc0fa57719ebeeaa71704c34f032208db6628543da42463 2 @@ -12411 +12411 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html ebf1d21659de0cc6ed6fb96e3176928f8a8b4d9efb760f3b07dbbe8e92f45c30 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 49b1b2c5c29d3c41a6095aae7152c49b1462b3a3ca32059e56048e75df64ded9 2 @@ -12414 +12414 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html c5cc95d6e309feb21bd25c5b7664f556604d73ce9216d5a2e6378bfb0d23167b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html da345a205c82ba00759c07baf9773f65d0dc25fa782dca37ebe3ae1c07616131 2 @@ -12426 +12426 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 4d99ff1eec4d2a657e3c6b7701dfa9fcba8c5f7561275e29d54f06510e7ed10c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 63e59cb33d5cc7b4d3cbe63a109c5d29f5bc3df7f11be53ac7b54a68b81f0936 2 @@ -12432 +12432 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 5c769e5bb17af83f88440a83c97a6b2004036643a03c33655fff996cf1d8127e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 59dae45605ab95e465869d836af4e6a109cd63bc99a0dfc723ab80e624ce27c7 2 @@ -12438 +12438 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html b71a846f7ef325ce872cdae8451f7e784b1f9b2c92200f28f5f5ec9ac1f0486d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 20523673758099efdd0cb7e9809254f3247a6eec1aaa7e4f2bc4206ca23e419b 2 @@ -12441 +12441 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html feb25386929a34d07de23b06b2302445c4df7d340796d62bab6ba88bf372c0df 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html eb820902a7ca566ba160596f9db40895b5c595003151dfdf3ac5a6786fba55ba 2 @@ -12457 +12457 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html cc14cd054072870e3956596084aa5ff5be2abfa3b40911358ebd301ebdf86d00 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html f2ec811e63a1ca5fb610a19cfe0e6ec7b1ca6ed46f9d063956e6b0315602b404 2 @@ -12492 +12492 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html bff345832919c88c28af000b6008ea8a1b02f9e12d9b1e16dd21107f1c441b8d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 5073e28d4dbd3b6b4c05d934f747fd73c8cd5b5304c4048100d1069cf13c35a5 2 @@ -12536 +12536 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html fd08cf12db8b7d637b536383476a0cd4f40b365b1e5f8bd79b74d3af38e91f59 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 90b37e4e5cbf6e9a3f728c8ab2fdc0cadf90822982e7b997c41db17a5b10a3c3 2 @@ -12540 +12540 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html a8211c46184e90cdd96e3aebf01d2caed32f5456197551d523b011c86876eda3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 4748688c48b0555d59e8da635079afa0483c58181cd6d5f09b1a38a731723f4c 2 @@ -12542 +12542 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 3f148645ef676358f4bba87b19fd266a081c1200a0ef5797ef81fdfb2414ef53 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 43219d9f7074448075a9cd82bcfec41576fc3ebba1e4ffd5af124670c5bbfdc3 2 @@ -12544 +12544 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 811055d917e39af521f31db6a97c3f9070c2e2924949e4e73cccb6f5565651ea 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html dab20f25e80217528be3d54cb9ae42a4094b95ed433075f0610b217d4f9c8b95 2 @@ -12546 +12546 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 1bd2a72156ec011611a58001a6098356fb2c07740e9716486dc75aa3059322cd 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 97ad3297222d759cfe54f4368755c63ca6494b1bcb4f0514675ed8d59ac30593 2 @@ -12557 +12557 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 513b705260efb813a3c6698c25439546941ed72f936ce45a2212d95fdde6fb05 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 6c35c4a8af7bc2d3dd65f824c691e66115bd55ea4ab116b2992cd7207f216cbf 2 @@ -12560 +12560 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 86edfdf52512a7692d5f0095089d2168464d8c47e1093edf7c28c400a2a27663 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 92e31e32275158a85ee91c8f778469a21e7f4cc0d80bfd3d31ca1e211995ec34 2 @@ -12578 +12578 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html a6b86eca43022afd428f08e9740be25fcbc12b8ba19106977da169017bd21030 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 9891cd0447f84539a9e29c5a0633b5d1468a3bb15e820f0381f1de33ad119f55 2 @@ -12587 +12587 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 8d3ad360fc07c2e96d0822731a53dd25a78a20ce082d7e7b7d23d48a4422c651 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 1a23e567fe129357366637ca18124e3483bbf73f0a6076a9cab7cd519d0a71fa 2 @@ -12599 +12599 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 67e9aeca558924fef4fda19944d1b6465a84bf5c8491b9a5c1e341e967eb8db2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 7ebf36b15a45c44b96cd0a7333da16d1df90fc0340284b7622271bff039b45ac 2 @@ -12602 +12602 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 42224e3cbfcdf3f91c0b80f9c282eccdbc6f27cc3f3ae580352c6b10610c8b00 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2a50ff24648f92d80376b892a8d77ab82f365d195c223aa37c731b42e9bbd70f 2 @@ -12620 +12620 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html b994e96d4e2c86fabe75d32ed5f58e86a55f2a65c9e6f896f81e60f0052297bb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 6be560ad9827738431146c4049e7989836cbfbab1bf8bb607d5672bc6b12f169 2 @@ -12632 +12632 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html a27271d772c6ce2ccb31a08c40d270380026d9358fccb48a0eb9f95d339133d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 0c3dbc122a96361208d4fe6500735b4b94658aaf18e232138c45e3ebedd780a2 2 @@ -12636 +12636 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html ea63ad060ac3dc7f37d88f48f7640bbf282fe98825623b04de83863ee32292a8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 5b38d2129577a165144228ee4fafeabe637f4804146b9e96a1916b4ab95608a9 2 @@ -12639 +12639 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 6b75be9e142b184fc58752518c2556139c939bdd0890d15b5a5e1b0734291aa1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 3fec2afc35468f60304e4a3e65e3d968a26fa57eff58f87e06663a0c7f64baae 2 @@ -12644 +12644 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html fb1f033cd6df0e7512880b1282446b27ac5526bba0ebd883e4ca4e7b494dae18 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 0382ba6f8598b4175295389f492f349bac834079aed39a3c79481a5d923aec51 2 @@ -12647 +12647 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html a72adfce394f2847ebd7b8320427effc9af1681dbe135e6172cf8380bbe8b68c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 103f150fdd8f3c45f065e6786c3e27a0bf31ecd1067981b069db62cb46561e62 2 @@ -12649 +12649 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 5f6d0334c0356abfe55fee4949312052988110b34a6abcd2e78bab62a354cd2a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 21497771eae9fe3805a19c2da662ff12fbc65cb949f8d3ab7288e5f7f36d3516 2 @@ -12667 +12667 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 587c89442fb1e1365f9abd4308223a23d84223c9f292a66c162ae4837b42f99f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 792f57ddc001cfcb9278fc316f9af6b7a06eeeecdff72c45cc64fff6335cb172 2 @@ -12675 +12675 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html be23df1dc748ec60081c9daa79f64d46c97967c72017482799f1f1b6a163a5e2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 089b222369b8a41ac2ab31eec40244556eb9e2b62330a2675c6e1e3470f495e8 2 @@ -12683 +12683 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 063f8938a2eff09e400a16efe7c94cf96cfaffaf0ba87466c9f33aa5a89fdefb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 5b7eb1c9a255accf60828f0e9d47e95d074fbd94ad9df1e9ee5e6e3be797efa4 2 @@ -12687 +12687 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 00f7ee9f39046b9c253b08039da08866f1ccc7a0e49dcc1b8e58e6c8c10224db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html d429ffec4a1ffc291e9eb8e2c423cbee4be080fb9da6f3675dcec601301e500f 2 @@ -12693 +12693 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 026083006daf9e051680e096e15f730e70c2e90c5cda625a1401e82f382591ac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 7381d51b6f0e6b7de4a33f4d0fb91519e3204fcb760b99d63b72ce9d40602406 2 @@ -12697 +12697 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html f9bf8fb93661879a78d2ef72a841d8793fd664c031e9a66febd1d7d201d11752 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 34742ba790e8975f67a0d20f4bb885872579fec774de7b72fc422b4f4e50ee4e 2 @@ -12701 +12701 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 3c2386dcbde0fd92e008e85a188e6bd6451dad86ea8ba56efda73f3f3aa121d2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 59a8e977dd33dc5357cc02858a79b9728ffe48fef42e6b74b6a7a33eaf512e40 2 @@ -12704 +12704 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html a4b4e34e709f47e3eef32a7f59d26cabd8d5cfb8e02d5268861222734fc90d01 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTableHandler.html be41870d76a7c3e456d427e73c84ed9e892d276ed4cc52234ee532bb19527f4a 2 @@ -12733 +12733 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html 73633028cd417f1d9c320b2b3d72b3f0eb925a8d504f9b4a051155efe7b56b91 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensor.html af92caae0211f2f8593a9033ef561489357bd9df51b67d48523b2d7b4ad651ea 2 @@ -12772 +12772 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 8d1b677aaf9b01034c9cdbc0c98ec7cef9d12d6e66815e92279ca15bd2275d1c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 5b5e4a2fdb7e8d580d0cc14d9063ba68131451f063090fbfc1e361226ba16b69 2 @@ -12775 +12775 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html fbc9ef645371e5f34c235526d96d7cc0a07bf5177b2cfdfb7b77cf87e97b36fa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2794cf06e8757367d120b447e9174a889944b92e5b5fda6db75559966dbe86be 2 @@ -12781 +12781 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 8c573277a6fef130e87565845a3f1c643d7d27d3bf6661e00898ed79dbfb0412 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html 97ceb90cf81d8a4f15594e2ddecbe21bbb96db7c69b3c26b817248216a5c9178 2 @@ -12844 +12844 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 932f3349a1ce967410fecc135f75e8139a219248334317767b6355711f3fc725 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 710e851559460dce6e20e6de1fc5c5941609c1c7807f36bc554387213ac18ac8 2 @@ -12849 +12849 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 39fdb89904501af813c57bff46cedc6af9ab4e5950dd7a792238b25f27a9811c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html bcfc23e124bacdde92b0208f0069b74a1d44d85281196bb804a6e29014326339 2 @@ -12855 +12855 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html ec13e631f3b1b4b53ca0934ab6ac3d12807073fae1058d4b4bf8693db9a5a248 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html ee8937d945aff4f421b67f54762e6745b4a09f53c7a1755845e0fe994550cadc 2 @@ -12860 +12860 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html f8fbb0a2770dbdbd87f49418c982ca091313e5e07223b1a784a69ee78ce0838a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html d09ee518b512e55b6d9a71694d28b8e679243a7ad57a8bd369a9c4bc695a2b09 2 @@ -12863 +12863 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html c7c7c3e8ad4907000d103676876ddc238168bbe659d633b00cfd98018c9d056d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 18e6de64bd14a667bc7f310f60833e81efc92a3984d188afa843f42b94e36cff 2 @@ -12875 +12875 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 7ba8dd2bf99e13211a990010522d278fc34195b95f81e35933eff2954c0aeba8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 6b254a45e55051a597d52553dc7ef6d54468c557601b25bbb52d2fa1b472f32d 2 @@ -12880 +12880 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 30f69b1d6ea33d9a0f6773747d25f768ae42369e8584c3368fdb9fd80cc95818 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 513a7c2c60b1d7c4a1dd8862641700a07806949ba55aaeb81749164c30f3036f 2 @@ -12886 +12886 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html c3d3b58f19c9af9edb3f7c31e18467258b162209ef2b224e46f37bbe7ed58ea2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html ea0795b11fa63b8660c4202342a68fea4e432b82f1eca6a4d5485750618d55d8 2 @@ -12889 +12889 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 95c3ef59d3d73453469923f5bad48637bb608534045ba47b9fb5ce63c3fca908 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 1536b76444ac9227ced7deecd261bf1d63112be5115fab2f7398ff76f6db3863 2 @@ -12892 +12892 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 43f690d0c4f9cb8b577bcf9cfb2ccaaba0bcb5cd9f2a558c3624a4e07c1f1861 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html af2e23221649f3911321268fede94139e5f6efa25f74b83fb3c48bed93d22755 2 @@ -12961 +12961 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html a64422736f89a70f54c3bb1e3d1a07daa91072e89c2e31bd1f921cf70ffaecac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html f7ae873735b94766240f101171ca8e8ff83dba46ce0afc43393cbf4d9ecf30fc 2 @@ -12980 +12980 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html be605e6e7e75a03d34a00fb76df5dfc1fa954838c66192bea685cc0addaa8b2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html cce33522f4ecd2336d303d087a26491934d21163b68e8867656395051e5cd5d0 2 @@ -13029 +13029 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 3db175c2b6968c13385291b9a18378d15f274f0d6a20d714e33a1fd60fd0bd3f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html d6a254c543e80b4a83423734878d8656dfb76abb22ee9c05d15426daf638df5c 2 @@ -13032 +13032 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 9862dc07d7698a176895b0aacab834af6b2cec905660ebf4ad93504b9eebe71c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 9f16d12091139159b327265034419d9fd048fd0fccfb31bf4d8c15e93651c57a 2 @@ -13034 +13034 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html d3ba4e2c05f51e866f4c3cfdaefed24088efee884831c3f93c1dfa183f690672 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1RemotePointEvaluation.html 8d780586eb72db40233e6fea344d20d664b71a3a17dff3ee44873357cd4b0c6d 2 @@ -13046 +13046 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 57647b13d8d74f25f4d0d61559a12dda3caf72de668bef41558c6aead8b0e42d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 491c709a87061c61ebbdd88fe5eb5bc3144e3bd69321c8138b17b8737f50a5fd 2 @@ -13110 +13110 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 820b04b489253c84f8438ccb0706a5552bd94911c9d360e18a08568ec416223a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html d07670be120deb3d3f9e3466b42c9c3efd60b11e52ff2cc817c7cad4c72f5fef 2 @@ -13237 +13237 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 0a75683fae9832b895de7eb605f57889958c1231516883c6f7d2b014d6d01221 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 16775b7891e73acaa802f87b85ae21a713e5b0c8052f5d312d770db6a9012c16 2 @@ -13317 +13317 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 939c1aedbe2e43dc49343b53bca04b8e054ffa17dc53377a8dc6cfb97a4ba397 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 151211a082a6103b6ca912db77af8664a343f198ea851cef64e821dbbe54bfc4 2 @@ -13320 +13320 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 16888ea912a69845bf838f49861b1e517a3b9d6d82c8fc5d074c0c9c80ecb78a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 50ee4e3c2c1501e09e1d03b5fb4f37daffa62c4defd49fc6c6d2231e197eed7e 2 @@ -13333 +13333 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 1b2c75c20b7f457c155082b54a1f27de74d5de1ca0f892833f0be8c49757363f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html eb52eafa2f465cd2f0d264717fb408d8baed085d8a7b40ee81ce563c13755aa7 2 @@ -13335 +13335 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 44146dfafbf98774f18f579700ad0c3e04b0aa361b7268e7fed1c320b98ccdde 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 6ad6c9e28a2fa626ae986b2ce543de865dd72b9f99d73ceb1fcb29388edfc6fc 2 @@ -13341 +13341 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 0a07c261ef3842ceb89868108d38e5be75bf0a6108040811541d4cfc4177bda5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2f162ed2b629b0405aeb0f3e8318b748f23df1828240bb6f0683ac227c5ab815 2 @@ -13346 +13346 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 03ffea5a8b03432ba7d7bf23ea2ba306ea83a827a01e304265010d01e70e1327 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 948430a37bfbb341b7b6da44b87f809edbf28f2e534feb8141f35817c24e4e17 2 @@ -13540 +13540 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 07f4225e3ac5d48eb040e0f5477b1e5b880368bbfcad9553e5570116283d066e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 9ab7adf17ea4920f4b9d05f8b56fccb47bd623d75b64acb7d368874e9f1fe6bd 2 @@ -13545 +13545 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 408c5c14f6739127a1461f3ead0b1d2c307f58dad464181c793120483e90eedc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 8beb3bdcc101107923f08107c3ba8e94b4c858ee1ab2999127f229dbe97f6790 2 @@ -14008 +14008 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html ac4f678fe37b564e793696f21c2adf0ae95051c3a25f2dc4918a50690b3b3d2e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 9326636b77f9c30b7289f9e1fa240141ad68d0b024706e66058adc6a76ccc6c8 2 @@ -14108 +14108 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html 3d2dd2f73992d23970dde917280441561d55172c2831ebed4cbd4d5dae466e21 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html a2eb488528dfee4c829f4fabcb45f0f6170454167206d35a257793c04eec2766 2 @@ -14229 +14229 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 4925b6eb15b073b3e52d6c37533a8f415e5531db2ec427a3a02b5ad9c376294c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html b5f9c8ffde7ce5aa203d839ab97aed5c7fff0bfd9f986ecd02d26cd3ad193681 2 @@ -14707 +14707 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 38a85b9233e29696e9d8476f13a2ca2f08a161b58c37f5a5fd9f10befe594db5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 880bdba463f25ea4ac925ca4503eed14efe2bb791905dbd027d3ab5d3d994eca 2 @@ -14710 +14710 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 61328abfefdad83e455cb9a449a9c1ff991eab85f2b3a5b305cacb17bea2d853 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html b67dd16a6c1a8c267a78eb4bfeb2d4fc88e51691f726713d80dc43f9f932cd16 2 @@ -14723 +14723 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html d3735c889ec81c3ffbefb02d1a85b344005c1ff18688b8366627516514f6e849 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 6c72ec8e57e0b212449f66cb3bdb640438f836a0489cf38120978117af4b7d34 2 @@ -14753 +14753 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 92b558d1798fc8cd7e13b72c4393af121f8575965a0bab8d6efb14810c2e1e99 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 0234a25d8fc70003f17f67e1e911bee06114e057ce2eb8b4f96dd91c7d5a5a4d 2 @@ -14758 +14758 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2a7480afb65ad35d8ae993da078c62ed4b5c8634ce777a4a6e818b2783bd2961 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html b5c143a984bc3c562394fff0982d2d1afe99eb321154120d96ea8755a0404787 2 @@ -14760 +14760 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 6c9cceaf29b0b92a82926ac07516097d15312074c5049501a1aeff6afbd254e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 150425761351ddeaed4955533a4acea702085ce0511e421f6af97b2d216687e7 2 @@ -14770 +14770 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 56c4c21654233f254e2370ae117ea89db4dabb19b47344dc854b1beccf544000 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html e4821c32d2e9e3571066cc37ec82a1dcc7fa8df446d26a79216a4d09bdfafb22 2 @@ -14784 +14784 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 3674aaa80c5b285909441f21c9792d6e0fa58ea26fc4b4be75caf1a5a3717209 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 5841b8a75a8594dd6f1e096960809d63316ef622f96b0c184b0190608a22e4ed 2 @@ -14790 +14790 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 147848c89b068497748a976d1f835af79305f35b9f49b4e336cbc4d29f09b77c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 3ea0f28fc58f3930ad2559649fa88330e283f516746f36819178f58f06a8cdbf 2 @@ -14801 +14801 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 4745beb360eca1529867ce56307f948bcb1fd0cf68163a85d54811e95ddea32e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 6194effcc5ef75fc0d08eab23484a2080ccccaea27ac123aeb04a66d9d9c02b9 2 @@ -14806 +14806 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html f517937c8a3c189daa1e29b6e97b8ac0b0a22f005f65454a37f5293197c3e185 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 8e5b419650b717dfecf0e35fe5d70f9675d725b8104c6634390aabfc486805ae 2 @@ -14810 +14810 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html aa1d4ae695b242fbf9c65e4d6da1173b1be1c00c7bd6da51f056f0a196961a79 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 056f222ce9b4bc3cece84bbdd550064ce58f536397537bb2fa500a74aba5f1a8 2 @@ -14903 +14903 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index.html ec23e6e5010db0d173076d2bc48dda52c67a92c1adc47dc5983f36f95b581ce5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index.html 6be5088142d910fb42f15aa23338ff82ee65f5240765c5321454791189487c55 2 @@ -14906 +14906 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 14e01f492cf4795f38cc0a6ddfbd9a5cdde0a19308c4932930b392ebc3f756db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html cb0db8fdfa8d4e9813cf6980e14c7e4f559cefb48a0db65d635446b42d115401 2 @@ -15247,2 +15247,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html b0357927d20ed9a5adb9188574837a1f628c772a75896a49cf4ff32f9c379f8b 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 5041b4feb437674ed6748a4544cf262f2eac7e48495b40ef7e170bb02e2bee0f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html fad7faf2655d82937a6fced2c5ee4b22080ceae1346bfddd2fc6950751ebaccf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 79aa32a1963d4e0a6aac686a8eef66ca2f4bcfbdb624bb7d146e426aa8a2645d 2 @@ -15263,2 +15263,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 74649759dbab1603386368d62936d49c6aa455a09cfeee63f47f6b9ba9c08916 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html fbb6842b2129f4e992a0835ab088bdc48b688c493ce5868172413376dc2d2720 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 7dff5e8f13608a704aecf063f44a82db18507f896ce5287cca5ddc88dcd28177 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html d8d89687823123d2a6596e25f3fe56713f875a229ec08a3f94fb659c14b7b7db 2 @@ -15268 +15268 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 40dab4b196438d4db449ad2f32b7043d362d4792ac6ad3d25a8083c75e5e5689 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 6aa1485c7e8b4331a8d79e06a0f76e3c26a7bacb0a1a6b35f7f9eb1c6f612b7f 2 @@ -15274 +15274 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 3fbbb8095207798234ec22c58ddcd009874c4baf4b3701d3369d0e10d09a675d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 7954137db33b8a42a3293ccdbd4c968ec8404f6829d2bce8140c51ac8bd56d07 2 @@ -15279 +15279 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html d75f3496d38b4e820431b4853cbca8e926e14c051b63f8b89df401fc1d792aa4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 873b63041093a1a0417a2fc6ca5d3a5c6d6cffb5ac136236003bbdc5dea5e157 2 @@ -15284 +15284 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 5d14be656b9e8844fb3f967e851631049912658c409252a699cf118598e8160a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 44430ddebded2c958f35a5a29116464325dd5d6c1fa640b3bd8cf63eff41f1a3 2 @@ -15292,3 +15292,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 3fbec8bdf573aed283179d202d6788a582903ce1ad02dc9d6d6040813b12c591 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html ec6ef5e62820c1edce8f7c98f0e59eeb257201dafe92ee5a9f43bc5131d61002 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 8f5b01d81e2991e28c58cf3de699a515b42fb73008c2661d9b8f52910aa616be 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 06c5ac778f555931d4d02be41baa55a8ab739ac958327dc05fc9fbd8d02ac038 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 504abdde6448d291f7f28deb5ce0ce91ab9deae4160518d83576b1a9ec05ab87 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 263b0d0c7463af2c4f0d27a98737a53f14f0c832aa7ad31d08793e675c5f3499 2 @@ -15301,2 +15301,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2150eaf0cd2b45dd7ab57238aeb26050847eecab3b09154bed1cd0674150f242 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 71d282a6f87c49670ef841e86d5461d8f5467e9787c4d45a17d9e8bc3fd3f673 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 29876456c234f635ea2315f0e3f726627c5319522d0d6c087b14d27708f82cc2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html a464f82c93cb25af6c979ff691729d311da8bb465695216538f2169b3305533e 2 @@ -15309 +15309 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 05c747f6e21c27fddff85c6450968288f5479842decc408e97f62ab6b8dae268 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 06c4900decefd7ea26c3d1f746640c9d67b177727cd9a997486a8332a57c1578 2 @@ -15311 +15311 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html f2faf586dacaaa1c4d1b461accae155ff291c8fcd23f353a96f4841b0abc7565 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html d72d993fbd5d33ba83ca5dbbd28b197013bd22fa0207cda1ba3a76a3bef0c88c 2 @@ -15314,2 +15314,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 7750105da8bb14464914335eff8b2efa9e4555713bf4e115b28acf8969dd33f4 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 985fcd8a743d5f40a3ead92d916ec531d5057a867cb9e6be7636df18626aa5f4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html e88e5d9758bffdd9b461b453ce0017ef0b9d5b44de986c088ec0f3cce6db4470 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html b59854d85108c1ca645024e9a748e5e2d9e37f174c624662e6c7025499a8dc86 2 @@ -15342 +15342 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html c9ce53af6a8200937841cec69b78f75a0c1c6ddcd986150d2a5ed4afa101738c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2375549f15a53a0173d829ea68fb38a510958744fc2d1234d8e72c977551c586 2 @@ -15344,6 +15344,6 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html a48368565678d66e0e47f8937d2d4f9955d2bb735a4f9e5aa27b8fbeaad26d57 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html be052a9a9d991a08378b74ae19750032fe1f37b3d6ce2ba37e78229f5df82cb9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 5abbc5073df07c5ae486068bded769f5f6524d7b588cce2b4e91ead20e368e49 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html ba4e0de4ea90720a29b513247e7398f690ca5798f031783465588093f83e18de 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 32c3973e0c82ab3b9a2649f3f7dab80b14af6d27207307b5433607640b7eaf62 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html c7f2e78aa58de03784bfa3ce8d77685a0e8cd7ec2f873b7c6ed85fbe2fbcfcc2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html e47c5f917e07577e2625cb8d8c853f497cb8b9b89d5da7f6c0a1da4a167a2454 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html d475fb23281ddc0f9547775341712f0009d9f6727bd3483a5ec38e4e883cae2f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 6694e69b06c6c19a2bd603d45cc2772a230271b8e48e69ff4ba9591d90f04d1c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 785ca5ba752049b309978d2a36dd77dc32617ff3b8f3775a1324a8ae5c0cf62e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 77b0e2db5961ac7351af162bd355a727ab460678bafeda8a093cc07832953175 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 6e1cec9ead50fef5fb85a5cc2a35c3fcd7690b900c4de7ed4d5d7306f9870693 2 @@ -15370 +15370 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 0ac34515e9a9572cc78f55dc59f8164cb9b32a8aec4379c5ef8ff6f31ca7aaec 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 85efd48ad64ca880c68e0f39ade6a36f87b14c53b18f9b665d0b5a9dbd59b756 2 @@ -15374 +15374 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 6f1c63e5ec4ebf75e6c959476e3611ad00a94d2d3df33aaf220f8f02fc1a1e74 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 05187d3349af0e02abdb409030ecfd0abb5558ddffac6c702f3d1867fbdcce32 2 @@ -15383 +15383 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 4b338a24e4290c66203d774d361a6805a85a1c7028ed752f5b8005319abb04aa 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 495b0c2c7d37567fbea86319d2c6403ddb5bd5e7f59e1f19aa3db92dd085b1ef 2 @@ -15393,5 +15393,5 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html c4d377b7a90f3f54d3b10f26657e87a0da0140c676e1c8148bcfb0422ad0433d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 310cccdc90ecdb0cfbde8728f38f68be4ba5bd432760521bff11c90ac2bc6b7c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 3cd2caf7c04f557e7730392457b6c06681cc19d2a8833e85ccdf758c5437e291 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 1db3a6ad8c13a54c61fe1b6c542bb7f66344b8d4414033e5d71cde77f9b8b197 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 6342d3a9989a6500086db2649a7058f8260b47b4c5047b0a30191e416575124e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html fb5ceea73208d33a5f10683f9275cb3a64b159f015783ac7ab3bed65a4321a83 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 6ea42c540e67ef3caa7b8f977727a9897cd816a477c0172193263b8d29cc7950 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 412cd9df9eb11d8faa7528f45bed62aea9e28f3db770ec93ca1765420b3d2374 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 588f00efb0f4f07e286dd8cc77ee9ed10218d35d4f6c366fba568cc0d92808a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 0f62a0d0d5ca51f112e48ec648f506e21c9b575178e370823327405ca9071631 2 @@ -15399 +15399 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 3c68110306ab823dccda81c64792113c56c36721d25556c291374cbf65dc864e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 6504a2ba08a6883127ddf077770524a7b46bc5d9d72c17e36d1fc1126bf3400f 2 @@ -15409,2 +15409,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 92013efce7564721c869474ce5f4bb08ba82911f011adc0f02624db421b972dd 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html c63148f0b66be2bb78db17baa2160bbc978d822e28c907663238339431e7a8e5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 02178d328b4f2a2a0d5c339854c4693dd836b21b06dc14dc631238e4228dba21 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 0a14e2a36325a04197a2827f6ae99919b474c065b26cd3d0f71b1fb8f072c04f 2 @@ -15414,2 +15414,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 24c3de174a70b324158f721cef78faf18bf4abc8e06791f47b4ff4bd6856c36a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 25519a37ad895edf016b78d864e72087f92de9fa07e1d3608bc35aa5a455f961 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 88506c09686158cda3b7449973557cdbb647f7a99e75896c4f0846008a06d1a2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 793c983a1eea0b07b359c5a6c7d8eb9545803d73f7550a33f829bf52eb7d28b0 2 @@ -15417 +15417 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html ace4a184016f99868a0e9d001e5430866ab8863949bcd07d957e52f3d5b0264f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 8d029820af28abb96d756f628235a5c796a009af4d53d340ed84c49121d419e0 2 @@ -15422 +15422 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2f9e4630a816bf24d6677c8ed4e0d424046acfba9ceba4d43dd1d71300abf671 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html bfeecae9fc1b78051fe790b92b44c9460b7e3e2ec78a2ac47eefcdce1e49ecba 2 @@ -15443 +15443 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html ed1cad9c76b9f561e4d2ca5ce19ad24395be91521da53fa4018a1cc9b52b338f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html b025d3ab2448de3c186f77fb8674fb6e256be544791f49d2e8664d93e2056457 2 @@ -15445,3 +15445,3 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html ca5df8f16fb73c6b8c67df77ede8728a9663106c792d642e22a0bff483deae6e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 6fd91208f9873c079cfbaf3c54a7559ebaf5b33e8ee3ed91b43ebab7b37a9835 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html e40b64584e4cb7896301a59bffcc3cc19691b06a33acf8cd84871401b18915bb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 444282fff338060060d8a02f232114d3cd85af152995647b037ef6c6403d100d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 38800cf32800515d1ef095e7f5159a2d24203460cd04a8212c2ed61db381ba5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 125a9e31f00c92c781bf9dc40d0fc6a038e5bce6087824665ffda022279ab11b 2 @@ -15460 +15460 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html ad168dd215cb46532898ef96c34a9a1a473f0036df1b8b4dc2aa850d00b37a4a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 6f743fea12e53e814637c1c5d0d7b42de2ee9bc9e74cdac21f894b619a075c68 2 @@ -15480,2 +15480,2 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html b2062c10d71534819aac8989563d0bdf5d6fe757ede2ab6e992d4433066aa953 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html ba29e0a98ed18116d498cdf25483ed48b890554efd8e2b742c7983a3375b018f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2a97b3e9d1e1b2f3b7658323bedb9a3d31e63d8be8dea81c028c7159c17a71db 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html ce7bc02ed184769d511958199410590e43d35abcfd2b009badba3339b9fbcc07 2 @@ -15618 +15618 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 46dcafdd3265ba0298c435136e7216a2e55b54c8c822bc7d42a4994d69d0f0f2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html b28df32b9ff665625e988fc82cb1266b33902c826b7e399321ed043e3f96a7dc 2 @@ -16713,8 +16713,8 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html ef857bb9bc983093d7e736b2e3581b18fd86dc2d71c53b391a0ad6d455f92a1a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html a9a3946f4a96925a8f338fc0643caa99046712b12b3a517dd84f0d80a3c30759 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html f835c716479df4876c294346de6353a37ae6384b069004b7c8d5d1ce8664e8b8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 310a2f8d7ac5e044459612522d5b2dacbda5c69ad2c3de2f295ab272d4b881a5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_13.html b04d6dab62fd01f8eed9ada185f1586f655a65f5b4428d3f88e5882d8e5eaf8a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 4230ba738c31caaf5d5c6ab28762343271dd6a39337a8e2bec8fc3aae7156ee9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 768cf67c1e085cf0f6063b62723970847e8147eb53c90edceadf13674aab9e78 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 8718cd99699ffc2eb201c0fa502ba22b107e95925aade3450141dcac07ee196a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 88a956233e9148ef405399e4c3157fe13c6d77d3ef0081a14ce7ebb8fe947764 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 7e3a1a68753a0c1050d41a8ae670be34fa31f05412cf327d59cc71d972e35795 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 24fc2f91efbd419daaac2d93d3d4715f27f105b89db4bc6db5b434222610af66 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 03e5371719f0437f6d4193d6e8e9e12fc2e632c34c01a62de68a01520fb518c1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_13.html 63d403c7434b15efbec005aab43d877bca6d1daf09dc0caad7eb7234a1aac5f6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 650e6e31d70e64f5e33e2f2193d39ed6d950a7bbe7626ac487764455d809edab 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html cba2fdd4380705749627d865a0f503c929f1ccdd744cdae79039e5a3b7f2084b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 24a592cab0674848ada16bc5ed94abf889edb5da34bad9e56ecc0268958d0fe0 2 @@ -16722,40 +16722,40 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html a9fb753de55d8f2ba195a1d54ca7e4be82b6a56ecd3ea51156e6ce584a79d169 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 0891bd324c4a3aedd0eefffec6ffc03853af68102af6179ea900d2dc73bb3649 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 5e35f20ed84e975237889667b83468cfaa8cde558b77cedd9f33929f6c9ea4ae 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html fdc7bbf422b534fb46e5b20c877eac9b7c0c593759360a69ad66a4e130f1dbd9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 7a9104563b84cb73efd7d6dc26ab72faa25869b3350e8252ba51631f8904f617 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 03a6dac674139a2a61ea5c6b6198a6d6faa82d0aed2ef2051461ec1f61f530e5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html c0d2e521d8d2a4a79e768fc051bcfdca0d1c31348c22b0d9d066d1bebd3d6625 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 3ae2e85fcde716c6ea7d657402e4ab3fed3085e6aa7739542219f24ba850ebeb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html f35df0b5d288f2e0e45c44ba9bf2f01dd72da92999cf574657348ff7937ce555 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 8ca399e3b0d278775a403d5929beda27187b43c3731062423fafa7bc1db6080d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 551fb11e3d9ed4bf4f20d8c593b203ed111f7266d776a9fe9c41d18817dfdd93 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html acf223885a22fb0b7439a390d05b5c2ebfeba65a0223a71edbcc852f9bf0dd62 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 82ab7a8e003c479804e8a1b93b4a32199cc97f2a255e4fe138c1822c508e6512 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 9d30eeabb6005f010c5e3da595be255bb4cf24bc925847ef11e827e2a88df0c0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html dfc59caf83fbd33829dafd18bb56b31aab5e4921a44b0c66b763fe36848131d1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html eb336cf600a18b7db38214f41a32ddf937df0106b9065d3b7323d1030506508a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2680620bab9d2d0ada87d64d3468ec477f69b96aac7b9182abbbb136f15803d9 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html cfd38d424067c16856dfc384c02bdc95d57fde7516cfeeb99d2445f470415abc 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html a76ae9caf1a4a5ddf7494fada6a4e713b14d2aad9b1bdb32252d406a79bb8844 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html f821a6231c6ff7fca2b223ca8c8861ccf13797cf0fbc4d96016f6369fa600b24 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 58f4250e0c44fc8e08ce4110fb6b99b3e18f12e4d1e4a59c733d665a07f0e1a6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 784b3c897f68dacd449973cd6ef77509b9f1d7b302250f43df313596874d5949 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html f74e5102ca50fab9847d124591830cbb12094bd822aeef1227839450c6a96287 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 00cba833d37875679604add6c1c3756c2b218b033379301810cfedeb46269b1e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 4577d73dcc2a931be46cf27be1e4cff84973712d932ade09cc6eb0a633070e50 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html d10f77f898932899f8b94c7c25e67bd8277e63d8e7903ce2516b7026c4c62b58 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 33926c99a628b2a600298c92cdb810ce266c0e7b9d1b70fbe9757fedf391c204 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 3939b005bb45176cf5a432b4910e416c22fa9636b5459f3ac90c2f21be827cfe 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 79381a3c16cea68946d8bb5bfae12564a496bcab0d88354cf84cf026c8fbc43e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 9ab897a6476292918e5dc1abc8bc22e1ed44cb6fd6acfe263cd8bfcc79223d9c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html 4fabfa49ac2271499e0ed2c51e9007f2000c9d5413ae9348bd7f869aa3c4eb92 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 1ee56a0b76f2c12909be7430ed9fe70f38e56846d895de6e22d56539822b3213 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html 1528422d21de21efb72dea1b72e1254a992e15b9652f5f878a33e5de825211b8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html d39327283f9740ede36fcc3346d2127d02eb8d852d99475f4220f6cf600646ce 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 0668a76540b39d622f656610b6a6d163e70fc217f685a44414ef4da2b1178d4d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 1dda61fca29bc3295d4f1e3007b33eae2a9958894c07a80a125ba9553652e7a6 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html ed19a8f3d6797b1a429245063b7f53852e7e5fee32adbe1f634a5b0b07ae78e5 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html d427d43a1fbae68a0e85bff89db604e6bf1779ad928ead7135f79c966edf13b8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html e9161515d8bda63c5135126c482b3fbe8540f6801d37ae81ea439ffce5c54b0e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2fd5eb2f6a3b6bd565b73c4b9525053a2c69fe8e32e0ba1b72e4c6ab1c942ae2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html fc1194b395f32a8b7bf33c9adc88ef9462e3f77909410e0d489ddf57bb321c99 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 09ea34b42df35575b4e672381a0a0d13cb555adf9bb5e97a7a2435464fa49369 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 0430163bc0c884362c581e5c1c9037beaa890a55ebf7e09df6436dde2430aa3d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html de162bc0c69e57691944bfe85b3ff69c6d3824a63ec5a82105a05516f399372c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 9f78bf8f4b5b29ada9c150be1a5ced266df249d54b8c5fd9a4dceb6eef4b7565 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 4638e9f93b16f17a55a4fff01a5e821c14a5bf5d4c4b0f3fd3be4de555a4edcf 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html 11d0c4672b02bc0c78bac8db0f5b0ab8f70a9b1b962251a57412ce061aee618c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html 3b2f2f34bea486bc2cc366395a5a70e3a7694ab4b2c94a6bcd1fcca45d4ccf5c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html b12e72ce90f7702ba79d7e585faf473b6bd82b4b50bcda462c6d9f7d7cded630 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 7ee897e70771141ede75b735f2f95a115411fe18f5f61e89981fb19dbd132bed 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html 0b39d01a8bf9cf450bfe709c51700e67d5d17c3740467b764e42e502a99f0d23 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html a5df8eeab7db13ae28a80d8b29be79cd7ea7a8e4066a9f225b8206edf7dea0c8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 4243aa5de48da8ab48a7680532028b70a2fcc7abc6cc91bebdaf4b63acafb7ce 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 23d56563e2b608e2856a6fb9ae835d8d333821dabff8ebdb75870086d060ec22 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 84f9354604aa1b6bd982f267d98d25c936108d8f7bd7cf4751ee63d257b6d448 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 7a993acad8cf16cd53fdee5c647a407013156c754cdbfa935e64b8bdb9965e2c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 20adb5c44fa62c199f6cfaf9d8dbb67503e599a693d122e9a2f2751f03c1bb6c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html b55d3a084b6c79e35158649ba307f3c80c673ea09cd341e23d0634c6f4915877 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html d56de8ee76543af2cd624ca5c00feea50ee2f7f26c26655776bff13458924647 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html fe72083b8f746fe88e7cee84e404843b08104ace007ac5d3fe031be5c33b451b 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 0474e1619971242b8e2ca45b684d38d8296bb57a78cefa7f1c2d783319464729 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html e65bebc5fb6eed9da21fbacb3aeb69cf8e7616527f9db6b8c27da282bbb8a546 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 041aa31451186b9c36a5cf272a852f30d6a3e7e2ca9a510967ec25e702911ad6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html b33312369d78946e5512ee6acee1dd9c18444c68488248a11b9d64ec62f8c45d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html e0bc97a8dbe56c1dfcd903eb06469b12a279adb12e11b91364a12bd51c30b54d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html b6798fbf927cddc2ce77441cfc19ccadcb979588551725acc00ced40f62b49e7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html adb5f799e53217b6148c9384286b27e37f2d2b9aa687169f337a85c587108a06 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 9b920c2191dabdcc2b5d74db4c5cea9b567a28ec01cb1d9784648f388269c962 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html e645f1e4a0db950b781c82eec96fd820d74c944222adeb3f698b9b83945851f1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 9c2f1e61a479c19ea2c2c4884eb9017a15ad970af60e4adf982e2ec6b3a46b6a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html baa785c2cada18508490d319abdb818b2700a3ac2e26a9c492821bf667ddecc7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 3299b4eb030c77311cd2d380749c38e2c64076f3829dce3e977f2b0074986c86 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html c3db566e17e802641857ce596103b7b5e056876b80413660f1c41c1572d1a976 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html 992e0541749f9a49594295be215d82153ae6aa94af7cc7407d296bd3988165b1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 9f8d28d6fd7e34d98c6024907c82ec3d76192cf780664bcf7314ad587bb47507 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html b5a65a55a4570da775837b34f5de5c8a19315b8ac8742644f764d120543b42b7 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 425c82d0db32e5655a839dc8a2f590f6707ee6794182c7eed20160804bfa2229 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 18bbf4f0c66d67902575b3e51971b6947907d2625caf56ec9374dfc711e53123 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 63c6feee1e89f676847d17b8dff98a2a22afc43d2a8bea336504d4a4e2609bf2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 07ddc5b3d3a1cf1e5d09fddf4d70d740ee14526be773641408733f87efe74d9a 2 @@ -16763,36 +16763,36 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html cb5c33ef243511f4c0ad4269440cfbc1cce599d990e0a5e57d01386c478a23cb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html c939855c91260ec0c4eab9c497bba1fd88cf8129aed066c518cb7f2b3b9ae86f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 13a01c3ac2a5ea9b219e818c10e8cdaa265270c1c25f323d5b17276a72d7d244 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 0dfa87edd386ec14031dbcab35edec39cd35ad3bd09c9dd9843245e70a8a4881 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 556c413f8396ddcec8cd1135e8a3da96137299b67cf1a73f3f92d541810c50d0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 98dadf85604960520ef9cd03a5c573957ac9228e442c6c5b6e1142399d5a97e7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 43f6f501da3173c9e4f50c41d39bda6f273e69825cec8b3b1177a9dbf481f164 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 045229e284259da9ed450bc9b6b2445379e7b1c183143a3b9cd1b6b714042ca0 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html e7cf3f83cc40c75fc7d6fdf0af9fe89a7e2e9b6c9403b3278eb06675c084cd48 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html a6d5e980cc21766c42fb5f78f81f3542771d25a39e1c6083cc8efa5bf844f41e 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html e54203b573bf8d6e7965770d4b4f9155e33c5ce528430f1856ff3e4e45386bdb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 7d023ef39ded3a3c5c72acec0be96d6e7d167793fd43f85f420033a814aabc09 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 83754cedfa891cc7705a4eea90837cadb78f157ae6ceece8d383d084a3bd7f8f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html efc5a6f346c2eabe89c574be266b931dd0af69ee36a650cec835e5b810f5a8ac 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html a332202dc6004092021969e6ca72a8a69a2642b9fa77ad0cb9fb6ccf70687f88 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 1b222effbbc27cec98eb81fa1baed4e10e138ed73114f0d9ce51fc77e45b8c0d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 3c7e82d3f6f50f65ec784fe30023525a1d49874d461ed5e6175b07955fce48f8 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html a1834cf449f0ec25c86ca65cf0c65c285f5ff75fc513e8594ea9821f58803241 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html d6517637d3ab96f4d69b73b0856aae49a9bf190b801fd58e76550c0f170b0a18 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html c243a8358e262ad7d54de7a5970f0279020fd85578daa05a1b853773aade42c7 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html ccd25603ebd9200f2b20ff4fd70d8345b259fdbc588576b40ec18b83fe05e4ea 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 1316d06acdb2642025a4d7e10c0dffb4e81c82f6bd6ce2bd060e20026517f12f 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 50ea2152bfc1e13b2188fd7d37af8f1274fb284cd827df050f0b4aa0e5ca009c 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html a2c6c5db5940f504802cee0f17bc298aa8a5151fd621caf2ef9d37f8556aa94d 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 4dac16bd2ab4c3e813c126a9eed5a96b01e5afde5c349013f95782115a242d49 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html e65538b0c220d349b74ac5776af1d3af9702cfd8c4565ad91469fe176bb26b9a 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 1c8c84c4af6cc72f4336633d4afb1c6209c434425a423b5f678747335fa4f595 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html cc019d846c4ead63832c91094bd5aecb195b3d489ae9e1695eb415b4c48cd3c2 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html dabb7a1a1bc96e73dca9cdaa06aceb3b2e8770571b191c93bfad1231b0175837 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html 97cbce424b570e55260caf3bf5678cb31107091ed23db9a2a15f4048640f43a1 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html 75a9d9451bde75b454ecde013e5643c06da29d9fb93acdbb0d44c0d72a2efecb 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html 4e44092550edd935ac7b1c46078c9c2cfa3463eb74f3205c3dfcce4298b1c1fa 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html 6cc3837fef2643f747c01be46b8338850635ee116becd699a6c4096a1b357763 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html b4bbc7cb3f58f8b2261f022f412e966e5683c09f8791edd98e74e4edc5e19818 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html cf426d47242855c39bc956b0df87324cbe816eb3b3ef524fce6322f3d00db539 2 -/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 96dc9799dd17099b14b86e0469f4e91d7fcb586b58300b859fed9f457bb2f94c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 2ede3e22867c4c8887b4f2fc4fb4bb59f3216a3b47e1c4d198c41a91c9b292c5 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 0928cc06f9fbf1c6f230a15cc973a2653695a960971af8109a384118b5de3464 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 9f10d3c3ec1e7a09c1f0a6d30ce79196643b75997e85921058c1014999a28636 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html b52edb5240d3f59330444d9c250413460d61ff3cfce3672b567c0659abe73af8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html f5ad069ba3374bc02373ec332db310efbc6e52ef997d24562df32bd9de36a2f3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 46003e3581e84cdcc6c1aec26742c783d53e7eb7e6da818a789261cf0ebcfde2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 72f95f77fdb19d9beef58ea800f5d5478e974dd613919625104b96843b013b3f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 647bed4b4253f350481fd0a54352b275a97d2a87947dca7869d61b5835ff3929 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 631bbd4fb481988be33677573ba964b03c5116af0ee7f5ce3f9c683660d287ca 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html caef6e05f948265f3a0f2b76b7d98b54d7f9b2dca7d63cf63f87d64d8402886d 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html d056f68a99633d2e19cf96bbdeb8ab39b761a5ef1cfa7282638898d7cdfa7904 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html b6923afc0f5770981dfe4b544a3ccf4a5c84131036dca97834bb6f75dac515eb 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 79921b0ca642fc5f3da0c6336d14d56fc1bf1aee4b2fc88d6bc63f40454a9470 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 18d27ad4a77dd52b120885bd14abfe8da4c42e431c26f6a9b3b48babeb541f9c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html d2c28575da83ec10319f61b759da20d7c18cf325160608f91e544ae2d8b0e67c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html d967564a77732fe1c4bd8765e4949b1b91a08b9a5a01bbc2d831ca55cff10db2 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 71e98897f64723eac385fc1c78230c111341a2c839afa15418076d7fccb98a65 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 68a9fc62c171249d171211abaa4e68dbfd982e5d84f8e97cc911668baafe0334 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html f70f098c3e42a28427823e3420ef3bb3f6937503833468739f5b25b50d3c3268 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 10d73ff0de938c8db0de3a8f4cfdff7708026ba6f3fa9f3bab4e4ede9b4bd000 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html aee6a3b1b37b7ba4043a8130ed4e33b2bdb33059c943f3cb43226d39fedf1e38 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 0ea0f9fcd9fbf5b3b30bc2cd2a3cc155348706b017cbc575a2090302b870298e 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html d5263296446f8c3047d28f9f5e164f50e387c1bdd7fe3fa0fc932e6e6d9d3397 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html daabec6e4443497a54365590026559fea0a8d5f5a990ef4601ea0f050ddbc9d6 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 358d5b61544ffb37dd8345a2f5c8952fc2ba91545068ebca7aa819cf718883d9 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 6d2cc5251008a28322c777929f724bcb075f921d2941b980d249ba261a407467 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 28492f28592e75aedf296fcc8ab0e8cd00112665a0553aec9332aa11990b85cc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html 1b02b1f55961775d3024d6aecb59635d021fbab71b4786dd230b5e516876be40 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html 2381f26f32f423c6773fbd69a7bc0304d49d40718fcbd575eb3f99d1e17ecca4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html 629e092ca7eb0e675a8e98d8691e396ec39c28a3655073efdfe41ad62a1ea295 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_85.html 943020a1aed2f5b35d8d03fb51d9bb36160d3ff8dede9f8a56dd97b9eb4be09f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html ecc7ece120548c1b9970b62d537cc35c823cb5c7a6290e6c93f97ffdc2e7a20f 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_87.html d6e9fa5d8d114ad561f86733e2d68946b29bb74e18faf190142ddcd451df9979 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html 26ae24c919f4279a654fc1d7026f0d3efb240c8f58697fe1f07a7bd715e737d3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 09380852d9ad45573e6e19ab14aa4759b63cd7c9f178e71f210768f4c3bd310c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html cab0d9629723e1e8c18fd49624e7a3651a1e3098457d414454947200579686e6 2 @@ -16840 +16840 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 98b8d157539beeb9711ef40283e347be5606e8af83a3df562b86d0f0db807533 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 18f17fbd3a62fc7cc89a045e8992a12bd4da673505a91bb19f35615ad1b4eaa7 2 @@ -16845 +16845 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html bad0f0c8ac3f2328d3fd144304dd6ee10e66060f2d00654a5505b031ba3adc14 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html d95dde75e4f03a41ecdc14ff7d9fb290fb9077bc5831af7389a8b15d620fa0b7 2 @@ -16896 +16896 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html daf75ec4b53876fbf08e012ea5f557689e23b49f1ea439467fb1a15256a9a9c4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html a3065ead55123ef67d374810e0c782e7ad69ee82ab9fb22f9997c2d695bcf463 2 @@ -16902 +16902 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 43c4874e294ba3b41b601d594383c209224690c4ad98ef28acde0e53a09b24ba 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 76f2e092e6d29640ebac25d2c37391211f62f084ba58cc21c65c81c95fe31449 2 @@ -17082 +17082 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html f98d6f6d649e18004ff87fa51769f19fde509873930ac75d443b72a555fc5321 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html d0f4c6386d59ae28addf1552bb300c6037e9ff615605a57f9d0155c24690890c 2 @@ -17206 +17206 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html a6af8c3cf08d83039cf833521f3fe5036798c677a1e086d480450c0e3d93a9e4 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 50b0a6734fd9a0c7e6c5736f67f6aec6109049c96060537a40b4f153f7c3b709 2 @@ -17210 +17210 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html db137c65968cd38c52c067ebf6f8419a0eaeb4c014c5644e5e13d414a547fd4a 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html d8c9b09efebbdc8736e02df8e981b8e6b5c2b34e184198cb2888ca5ef9345b9f 2 @@ -17214 +17214 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 17c13c9c53c7d744cadef37c8d2faa5e4e07db7351ea32e53f3f09eff949b045 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 85ae16d103d1286d83bd5b24622a2dd13d3db2e8607edc81503e2653717c987f 2 @@ -17366 +17366 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 1bdca15b44f19bb271151f330e7b623e04b94a1e88afc22dc1efab12a65e01fc 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 7e79751fa6fa602c560d19073fcf276830b285f346e8941da226c5b7311fe58e 2 @@ -17397 +17397 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html e49b026d5f03ee9bc4bcace8d0826e027b9f24102bda9dd425c8bb4a7bb096b3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html def0aaf1e2e34dd9059889fa5a4233c48329c750d0fba8f3216d7792e704a4f6 2 @@ -17442 +17442 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html eb8ef97391272be8c82fc6914870450db60ba097ac9efce17508faba47e6b9a8 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 84b54c90ebc5ef115e1035364f6f64c8a87da8950c6ab35f1f82dd5deec22274 2 @@ -18346 +18346 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 2b44a261b1199bed0748392b4dcdc8729496461a71853bc5d3d541b850f5bca1 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html d2d4315402173a8a5160e43febf4eb7ced557de2c888408427494b7f0043587c 2 @@ -18350 +18350 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 98fd0cfedd407b59bde6aa432d57029f6f4218a8fbf269ca55e5eae2532c342c 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html fc77cdf7435469d3b068c967e21a32eb3500ffd78397555b0aff66c9a63ff51b 2 @@ -18382 +18382 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html f3a8864a754a6da3931eea6b499869998b81f88348bb0d6e25033fbdcdef69e3 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 0481356dd8f1a9d32e46a1d5a40fefba390ba1137284ab39d15e9b4978c46996 2 @@ -18446 +18446 @@ -/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 73aa7415fe370e692b68b756c9085b96794cdd23376bb7ebc901aa561a86a2ac 2 +/usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 6593b4a6a60abfd5c6c6cbb35a94dc93afb931963ea3315e344b6b478c0380cb 2 comparing rpmtags comparing RELEASE comparing PROVIDES comparing scripts comparing filelist comparing file checksum creating rename script RPM file checksum differs. Extracting packages /usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-11-15 06:44:06.499466774 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/DEALGlossary.html 2024-11-15 06:44:06.503466810 +0000 @@ -116,7 +116,7 @@
Block (linear algebra)

It is often convenient to treat a matrix or vector as a collection of individual blocks. For example, in step-20 (and other tutorial programs), we want to consider the global linear system $Ax=b$ in the form

-\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \left(\begin{array}{cc}
     M & B^T \\ B & 0
   \end{array}\right)
@@ -127,9 +127,9 @@
   \left(\begin{array}{cc}
     F \\ G
   \end{array}\right),
-   \end{eqnarray*} + \end{eqnarray*}" src="form_92.png"/>

-

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

+

where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B$ corresponds to the negative divergence operator, and $B^T$ is its transpose and corresponds to the negative gradient.

Using such a decomposition into blocks, one can then define preconditioners that are based on the individual operators that are present in a system of equations (for example the Schur complement, in the case of step-20), rather than the entire matrix. In essence, blocks are used to reflect the structure of a PDE system in linear algebra, in particular allowing for modular solvers for problems with multiple solution components. On the other hand, the matrix and right hand side vector can also treated as a unit, which is convenient for example during assembly of the linear system when one may not want to make a distinction between the individual components, or for an outer Krylov space solver that doesn't care about the block structure (e.g. if only the preconditioner needs the block structure).

Splitting matrices and vectors into blocks is supported by the BlockSparseMatrix, BlockVector, and related classes. See the overview of the various linear algebra classes in the Linear algebra classes topic. The objects present two interfaces: one that makes the object look like a matrix or vector with global indexing operations, and one that makes the object look like a collection of sub-blocks that can be individually addressed. Depending on context, one may wish to use one or the other interface.

Typically, one defines the sub-structure of a matrix or vector by grouping the degrees of freedom that make up groups of physical quantities (for example all velocities) into individual blocks of the linear system. This is defined in more detail below in the glossary entry on Block (finite element).

@@ -148,7 +148,7 @@
FE_Q<dim>(1), 1);

With the exception of the number of blocks, the two objects are the same for all practical purposes, however.

Global degrees of freedom: While we have defined blocks above in terms of the vector components of a vector-valued solution function (or, equivalently, in terms of the vector-valued finite element space), every shape function of a finite element is part of one block or another. Consequently, we can partition all degrees of freedom defined on a DoFHandler into individual blocks. Since by default the DoFHandler class enumerates degrees of freedom in a more or less random way, you will first want to call the DoFRenumbering::component_wise function to make sure that all degrees of freedom that correspond to a single block are enumerated consecutively.

-

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

+

If you do this, you naturally partition matrices and vectors into blocks as well (see block (linear algebra)). In most cases, when you subdivide a matrix or vector into blocks, you do so by creating one block for each block defined by the finite element (i.e. in most practical cases the FESystem object). However, this needs not be so: the DoFRenumbering::component_wise function allows to group several vector components or finite element blocks into the same logical block (see, for example, the step-22 or step-31 tutorial programs, as opposed to step-20). As a consequence, using this feature, we can achieve the same result, i.e. subdividing matrices into $2\times 2$ blocks and vectors into 2 blocks, for the second way of creating a Stokes element outlined above using an extra argument as we would have using the first way of creating the Stokes element with two blocks right away.

More information on this topic can be found in the documentation of FESystem, the Handling vector valued problems topic and the tutorial programs referenced therein.

Selecting blocks: Many functions allow you to restrict their operation to certain vector components or blocks. For example, this is the case for the functions that interpolate boundary values: one may want to only interpolate the boundary values for the velocity block of a finite element field but not the pressure block. The way to do this is by passing a BlockMask argument to such functions, see the block mask entry of this glossary.

@@ -177,14 +177,14 @@
Boundary form

For a dim-dimensional triangulation in dim-dimensional space, the boundary form is a vector defined on faces. It is the vector product of the image of coordinate vectors on the surface of the unit cell. It is a vector normal to the surface, pointing outwards and having the length of the surface element.

-

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
-   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

+

A more general definition would be that (at least up to the length of this vector) it is exactly that vector that is necessary when considering integration by parts, i.e. equalities of the form $\int_\Omega \text{div} \vec \phi = -\int_{\partial\Omega} \vec n
+   \cdot \vec \phi$. Using this definition then also explains what this vector should be in the case of domains (and corresponding triangulations) of dimension dim that are embedded in a space spacedim: in that case, the boundary form is still a vector defined on the faces of the triangulation; it is orthogonal to all tangent directions of the boundary and within the tangent plane of the domain. Note that this is compatible with case dim==spacedim since there the tangent plane is the entire space ${\mathbb R}^\text{dim}$.

In either case, the length of the vector equals the determinant of the transformation of reference face to the face of the current cell.

Boundary indicator

In a Triangulation object, every part of the boundary may be associated with a unique number (of type types::boundary_id) that is used to determine what kinds of boundary conditions are to be applied to a particular part of a boundary. The boundary is composed of the faces of the cells and, in 3d, the edges of these faces.

-

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

+

By default, all boundary indicators of a mesh are zero, unless you are reading from a mesh file that specifically sets them to something different, or unless you use one of the mesh generation functions in namespace GridGenerator that have a colorize option. A typical piece of code that sets the boundary indicator on part of the boundary to something else would look like this, here setting the boundary indicator to 42 for all faces located at $x=-1$:

if (face->at_boundary())
if (face->center()[0] == -1)
face->set_boundary_id (42);
@@ -260,7 +260,7 @@

Component
-

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

+

When considering systems of equations in which the solution is not just a single scalar function, we say that we have a vector system with a vector-valued solution. For example, the vector solution in the elasticity equation considered in step-8 is $u=(u_x,u_y,u_z)^T$ consisting of the displacements in each of the three coordinate directions. The solution then has three elements. Similarly, the 3d Stokes equation considered in step-22 has four elements: $u=(v_x,v_y,v_z,p)^T$. We call the elements of the vector-valued solution components in deal.II. To be well-posed, for the solution to have $n$ components, there need to be $n$ partial differential equations to describe them. This concept is discussed in great detail in the Handling vector valued problems topic.

In finite element programs, one frequently wants to address individual elements (components) of this vector-valued solution, or sets of components. For example, we do this extensively in step-8, and a lot of documentation is also provided in the topic on Handling vector valued problems. If you are thinking only in terms of the partial differential equation (not in terms of its discretization), then the concept of components is the natural one.

On the other hand, when talking about finite elements and degrees of freedom, components are not always the correct concept because components are not always individually addressable. In particular, this is the case for non-primitive finite elements. Similarly, one may not always want to address individual components but rather sets of components — e.g. all velocity components together, and separate from the pressure in the Stokes system, without further splitting the velocities into their individual components. In either case, the correct concept to think in is that of a block. Since each component, if individually addressable, is also a block, thinking in terms of blocks is most frequently the better strategy.

For a given finite element, the number of components can be queried using the FiniteElementData::n_components() function, and you can find out which vector components are nonzero for a given finite element shape function using FiniteElement::get_nonzero_components(). The values and gradients of individual components of a shape function (if the element is primitive) can be queried using the FiniteElement::shape_value_component() and FiniteElement::shape_grad_component() functions on the reference cell. The FEValues::shape_value_component() and FEValues::shape_grad_component() functions do the same on a real cell. See also the documentation of the FiniteElement and FEValues classes.

@@ -282,7 +282,7 @@

would result in a mask [true, true, false] in 2d. Of course, in 3d, the result would be [true, true, true, false].

Note
Just as one can think of composed elements as being made up of components or blocks, there are component masks (represented by the ComponentMask class) and block masks (represented by the BlockMask class). The FiniteElement class has functions that convert between the two kinds of objects.
-Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
+Not all component masks actually make sense. For example, if you have a FE_RaviartThomas object in 2d, then it doesn't make any sense to have a component mask of the form [true, false] because you try to select individual vector components of a finite element where each shape function has both $x$ and $y$ velocities. In essence, while you can of course create such a component mask, there is nothing you can do with it.
Compressing distributed vectors and matrices

For parallel computations, deal.II uses the vector and matrix classes defined in the PETScWrappers and TrilinosWrappers namespaces. When running programs in parallel using MPI, these classes only store a certain number of rows or elements on the current processor, whereas the rest of the vector or matrix is stored on the other processors that belong to our MPI universe. This presents a certain problem when you assemble linear systems: we add elements to the matrix and right hand side vectors that may or may not be stored locally. Sometimes, we may also want to just set an element, not add to it.

@@ -324,9 +324,9 @@

Degree of freedom
-

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
-   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

+

The term "degree of freedom" (often abbreviated as "DoF") is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf{x}) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf{x})$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problem is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+   V_h$). In other words, all we say here that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf{x})$ we have used above in the expansion of $u_h(\mathbf
+   x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh. Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher elements one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler. The process of enumerating degrees of freedom is referred to as "distributing DoFs" in deal.II.

Direction flags
@@ -348,7 +348,7 @@
Distorted cells

A distorted cell is a cell for which the mapping from the reference cell to real cell has a Jacobian whose determinant is non-positive somewhere in the cell. Typically, we only check the sign of this determinant at the vertices of the cell. The function GeometryInfo::alternating_form_at_vertices computes these determinants at the vertices.

-

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

+

By way of example, if all of the determinants are of roughly equal value and on the order of $h^\text{dim}$ then the cell is well-shaped. For example, a square cell or face has determinants equal to $h^\text{dim}$ whereas a strongly sheared parallelogram has a determinant much smaller. Similarly, a cell with very unequal edge lengths will have widely varying determinants. Conversely, a pinched cell in which the location of two or more vertices is collapsed to a single point has a zero determinant at this location. Finally, an inverted or twisted cell in which the location of two vertices is out of order will have negative determinants.

The following two images show a well-formed, a pinched, and a twisted cell for both 2d and 3d:

@@ -387,19 +387,19 @@

Generalized support points
-

"Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

-

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

-

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
+<dd><p class="Generalized support points" are, as the name suggests, a generalization of support points. The latter are used to describe that a finite element simply interpolates values at individual points (the "support points"). If we call these points $\hat{\mathbf{x}}_i$ (where the hat indicates that these points are defined on the reference cell $\hat{K}$), then one typically defines shape functions $\varphi_j(\mathbf{x})$ in such a way that the nodal functionals $\Psi_i[\cdot]$ simply evaluate the function at the support point, i.e., that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)$, and the basis is chosen so that $\Psi_i[\varphi_j]=\delta_{ij}$ where $\delta_{ij}$ is the Kronecker delta function. This leads to the common Lagrange elements.

+

(In the vector valued case, the only other piece of information besides the support points $\hat{\mathbf{x}}_i$ that one needs to provide is the vector component $c(i)$ the $i$th node functional corresponds, so that $\Psi_i[\varphi]=\varphi(\hat{\mathbf{x}}_i)_{c(i)}$.)

+

On the other hand, there are other kinds of elements that are not defined this way. For example, for the lowest order Raviart-Thomas element (see the FE_RaviartThomas class), the node functional evaluates not individual components of a vector-valued finite element function with dim components, but the normal component of this vector: $\Psi_i[\varphi]
     =
     \varphi(\hat{\mathbf{x}}_i) \cdot \mathbf{n}_i
-   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

-

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

-

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
+   $, where the $\mathbf{n}_i$ are the normal vectors to the face of the cell on which $\hat{\mathbf{x}}_i$ is located. In other words, the node functional is a linear combination of the components of $\varphi$ when evaluated at $\hat{\mathbf{x}}_i$. Similar things happen for the BDM, ABF, and Nedelec elements (see the FE_BDM, FE_ABF, FE_Nedelec classes).

+

In these cases, the element does not have support points because it is not purely interpolatory; however, some kind of interpolation is still involved when defining shape functions as the node functionals still require point evaluations at special points $\hat{\mathbf{x}}_i$. In these cases, we call the points generalized support points.

+

Finally, there are elements that still do not fit into this scheme. For example, some hierarchical basis functions (see, for example the FE_Q_Hierarchical element) are defined so that the node functionals are moments of finite element functions, $\Psi_i[\varphi]
     =
     \int_{\hat{K}} \varphi(\hat{\mathbf{x}})
     {\hat{x}_1}^{p_1(i)}
     {\hat{x}_2}^{p_2(i)}
-   $ in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

+ $" src="form_124.png"/> in 2d, and similarly for 3d, where the $p_d(i)$ are the order of the moment described by shape function $i$. Some other elements use moments over edges or faces. In all of these cases, node functionals are not defined through interpolation at all, and these elements then have neither support points, nor generalized support points.

geometry paper
@@ -509,47 +509,47 @@
Lumped mass matrix

The mass matrix is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

It frequently appears in the solution of time dependent problems where, if one uses an explicit time stepping method, it then leads to the need to solve problems of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        MU^n = MU^{n-1} + k_n BU^{n-1},
-     \end{align*} + \end{align*}" src="form_127.png"/>

-

in time step $n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

-

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

-

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

-\begin{align*}
+<p> in time step <picture><source srcset=$n$, where $U^n$ is the solution to be computed, $U^{n-1}$ is the known solution from the first time step, and $B$ is a matrix related to the differential operator in the PDE. $k_n$ is the size of the time step. A similar linear system of equations also arises out of the discretization of second-order differential equations.

+

The presence of the matrix $M$ on the left side is a nuisance because, even though we have used an explicit time stepping method, we still have to solve a linear system in each time step. It would be much preferable if the matrix were diagonal. "Lumping" the mass matrix is a strategy to replace $M$ by a matrix $M_\text{diagonal}$ that actually is diagonal, yet does not destroy the accuracy of the resulting solution.

+

Historically, mass lumping was performed by adding the elements of a row together and setting the diagonal entries of $M_\text{diagonal}$ to that sum. This works for $Q_1$ and $P_1$ elements, for example, and can be understood mechanically by replacing the continuous medium we are discretizating by one where the continuous mass distribution is replaced by one where (finite amounts of) mass are located only at the nodes. That is, we are "lumping together" the mass of an element at its vertices, thus giving rise to the name "lumped mass matrix". A more mathematical perspective is to compute the integral above for $M_{ij}$ via special quadrature rules; in particular, we replace the computation of

+\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx
               = \sum_K \int_K \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_134.png"/>

by quadrature

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \sum_K \sum_q \varphi_i(\mathbf x_q^K) \varphi_j(\mathbf x_q^K)
        |K| w_q,
-     \end{align*} + \end{align*}" src="form_135.png"/>

where we choose the quadrature points as the nodes at which the shape functions are defined. If we order the quadrature points in the same way as the shape functions, then

-\begin{align*}
+<picture><source srcset=\begin{align*}
        \varphi_i(\mathbf x_q^K) = \delta_{iq},
-     \end{align*} + \end{align*}" src="form_136.png"/>

and consequently

-\begin{align*}
+<picture><source srcset=\begin{align*}
        (M_{\text{diagonal}})_{ij} = \delta_{ij} \sum_{K, \text{supp}\varphi_i \cap K \neq \emptyset} |K| w_i,
-     \end{align*} + \end{align*}" src="form_137.png"/>

-

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

-

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

+

where the sum extends over those cells on which $\varphi_i$ is nonzero. The so-computed mass matrix is therefore diagonal.

+

Whether or not this particular choice of quadrature formula is sufficient to retain the convergence rate of the discretization is a separate question. For the usual $Q_k$ finite elements (implemented by FE_Q and FE_DGQ), the appropriate quadrature formulas are of QGaussLobatto type. Mass lumping can also be done with FE_SimplexP_Bubbles, for example, if appropriate quadrature rules are chosen.

For an example of where lumped mass matrices play a role, see step-69.

Manifold indicator

Every object that makes up a Triangulation (cells, faces, edges, etc.), is associated with a unique number (of type types::manifold_id) that is used to identify which manifold object is responsible to generate new points when the mesh is refined.

-

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

+

By default, all manifold indicators of a mesh are set to numbers::flat_manifold_id. A typical piece of code that sets the manifold indicator on a object to something else would look like this, here setting the manifold indicator to 42 for all cells whose center has an $x$ component less than zero:

if (cell->center()[0] < 0)
cell->set_manifold_id (42);
@@ -561,41 +561,41 @@
Mass matrix

The "mass matrix" is a matrix of the form

-\begin{align*}
+<picture><source srcset=\begin{align*}
        M_{ij} = \int_\Omega \varphi_i(\mathbf x) \varphi_j(\mathbf x)\; dx,
-     \end{align*} + \end{align*}" src="form_126.png"/>

-

possibly with a coefficient inside the integral, and where $\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

-\begin{align*}
+<p> possibly with a coefficient inside the integral, and where <picture><source srcset=$\varphi_i(\mathbf x)$ are the shape functions of a finite element. The origin of the term refers to the fact that in structural mechanics (where the finite element method originated), one often starts from the elastodynamics (wave) equation

+\begin{align*}
        \rho \frac{\partial^2 u}{\partial t^2}
        -\nabla \cdot C \nabla u = f.
-     \end{align*} /usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-11-15 06:44:06.531467060 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/Tutorial.html 2024-11-15 06:44:06.535467096 +0000 @@ -345,7 +345,7 @@

-step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
+step-47

Solving the fourth-order biharmonic equation using the $C^0$ Interior Penalty (C0IP) method.
Keywords: FEInterfaceValues

/usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-11-13 20:15:56.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas.tex 2024-11-13 20:15:56.000000000 +0000 @@ -34,15 +34,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -161,6 +152,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -340,12 +340,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -739,6 +733,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1658,37 +1658,73 @@ $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ \pagebreak -$T_{\text{start}}$ +$DF2 \, DF1^{T}$ \pagebreak -$T_{\text{end}}$ +$\mathbf A$ \pagebreak -$DF2 \, DF1^{T}$ +${\mathbb R}^\text{spacedim}$ \pagebreak -$\mathbf A$ +$\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$ \pagebreak -${\mathbb R}^\text{spacedim}$ +\[ + \mathbf u \cdot \mathbf A \mathbf v = + \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v) +\] \pagebreak -$T_{\text{start}} \le t \le T_{\text{end}}$ +$\nabla F_i(\mathbf x)$ \pagebreak -$dt > 0$ +$\sqrt{\sum_{ij} |DF_{ij}|^2} = +\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$ \pagebreak -$m < n \Leftrightarrow t_m < t_n$ +$DF$ \pagebreak -$\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$ +$\det(DF)$ \pagebreak -\[ - \mathbf u \cdot \mathbf A \mathbf v = - \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v) -\] +$\sqrt{\det(DF^T \,DF)}$ +\pagebreak + +$(\nabla \mathbf F) {\mathbf G}^{-1}$ +\pagebreak + +$\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$ +\pagebreak + +$\nabla \mathbf +F$ +\pagebreak + +$\mathbf F: +{\mathbb R}^n \mapsto {\mathbb R}^n$ +\pagebreak + +$\nabla {\mathbf F}^{-T}$ +\pagebreak + +$A T^{T}$ +\pagebreak + +$T_{\text{start}}$ +\pagebreak + +$T_{\text{end}}$ +\pagebreak + +$T_{\text{start}} \le t \le T_{\text{end}}$ +\pagebreak + +$dt > 0$ +\pagebreak + +$m < n \Leftrightarrow t_m < t_n$ \pagebreak $T_{\text{start}} = 0$ @@ -1770,45 +1806,9 @@ $dt$ \pagebreak -$\nabla F_i(\mathbf x)$ -\pagebreak - -$\sqrt{\sum_{ij} |DF_{ij}|^2} = -\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$ -\pagebreak - -$DF$ -\pagebreak - -$\det(DF)$ -\pagebreak - -$\sqrt{\det(DF^T \,DF)}$ -\pagebreak - -$(\nabla \mathbf F) {\mathbf G}^{-1}$ -\pagebreak - -$\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$ -\pagebreak - -$\nabla \mathbf -F$ -\pagebreak - -$\mathbf F: -{\mathbb R}^n \mapsto {\mathbb R}^n$ -\pagebreak - -$\nabla {\mathbf F}^{-T}$ -\pagebreak - $0 < dt \le T_{\text{end}} - t$ \pagebreak -$A T^{T}$ -\pagebreak - \[ \left(\begin{array}{c}u\\v\\p\end{array}\right) \left(\begin{array}{c}\cos^2x \sin y\cos y\\-\sin x\cos x\cos^2y\\ @@ -2017,6 +2017,13 @@ $t=2T$ \pagebreak +${\alpha_{i,d}}$ +\pagebreak + /usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex differs (LaTeX 2e document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-11-13 20:15:56.000000000 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/_formulas_dark.tex 2024-11-13 20:15:56.000000000 +0000 @@ -36,15 +36,6 @@ \fi \pagestyle{empty} \begin{document} -$O(\text{dim}^3)$ -\pagebreak - -$u = u - P^{-1} (A u - v)$ -\pagebreak - -$u = u - P^{-T} (A u - v)$ -\pagebreak - $F(u,\nabla u)=0$ \pagebreak @@ -163,6 +154,15 @@ $\dfrac{d f(x, y(x))}{d y}$ \pagebreak +$O(\text{dim}^3)$ +\pagebreak + +$u = u - P^{-1} (A u - v)$ +\pagebreak + +$u = u - P^{-T} (A u - v)$ +\pagebreak + $u|_{\partial\Omega}=g$ \pagebreak @@ -342,12 +342,6 @@ $J_K$ \pagebreak -$Q_2$ -\pagebreak - -$p$ -\pagebreak - \begin{eqnarray*} \left(\begin{array}{cc} M & B^T \\ B & 0 @@ -741,6 +735,12 @@ $\mathbf F$ \pagebreak +$Q_2$ +\pagebreak + +$p$ +\pagebreak + $(A+k\,B)\,C$ \pagebreak @@ -1660,37 +1660,73 @@ $\mathrm{D\_X} \, \mathrm{grad\_F}^T$ \pagebreak -$T_{\text{start}}$ +$DF2 \, DF1^{T}$ \pagebreak -$T_{\text{end}}$ +$\mathbf A$ \pagebreak -$DF2 \, DF1^{T}$ +${\mathbb R}^\text{spacedim}$ \pagebreak -$\mathbf A$ +$\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$ \pagebreak -${\mathbb R}^\text{spacedim}$ +\[ + \mathbf u \cdot \mathbf A \mathbf v = + \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v) +\] \pagebreak -$T_{\text{start}} \le t \le T_{\text{end}}$ +$\nabla F_i(\mathbf x)$ \pagebreak -$dt > 0$ +$\sqrt{\sum_{ij} |DF_{ij}|^2} = +\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$ \pagebreak -$m < n \Leftrightarrow t_m < t_n$ +$DF$ \pagebreak -$\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$ +$\det(DF)$ \pagebreak -\[ - \mathbf u \cdot \mathbf A \mathbf v = - \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v) -\] +$\sqrt{\det(DF^T \,DF)}$ +\pagebreak + +$(\nabla \mathbf F) {\mathbf G}^{-1}$ +\pagebreak + +$\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$ +\pagebreak + +$\nabla \mathbf +F$ +\pagebreak + +$\mathbf F: +{\mathbb R}^n \mapsto {\mathbb R}^n$ +\pagebreak + +$\nabla {\mathbf F}^{-T}$ +\pagebreak + +$A T^{T}$ +\pagebreak + +$T_{\text{start}}$ +\pagebreak + +$T_{\text{end}}$ +\pagebreak + +$T_{\text{start}} \le t \le T_{\text{end}}$ +\pagebreak + +$dt > 0$ +\pagebreak + +$m < n \Leftrightarrow t_m < t_n$ \pagebreak $T_{\text{start}} = 0$ @@ -1772,45 +1808,9 @@ $dt$ \pagebreak -$\nabla F_i(\mathbf x)$ -\pagebreak - -$\sqrt{\sum_{ij} |DF_{ij}|^2} = -\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$ -\pagebreak - -$DF$ -\pagebreak - -$\det(DF)$ -\pagebreak - -$\sqrt{\det(DF^T \,DF)}$ -\pagebreak - -$(\nabla \mathbf F) {\mathbf G}^{-1}$ -\pagebreak - -$\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$ -\pagebreak - -$\nabla \mathbf -F$ -\pagebreak - -$\mathbf F: -{\mathbb R}^n \mapsto {\mathbb R}^n$ -\pagebreak - -$\nabla {\mathbf F}^{-T}$ -\pagebreak - $0 < dt \le T_{\text{end}} - t$ \pagebreak -$A T^{T}$ -\pagebreak - \[ \left(\begin{array}{c}u\\v\\p\end{array}\right) \left(\begin{array}{c}\cos^2x \sin y\cos y\\-\sin x\cos x\cos^2y\\ @@ -2019,6 +2019,13 @@ $t=2T$ \pagebreak +${\alpha_{i,d}}$ +\pagebreak + /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-11-15 06:44:06.795469418 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_1_and_6_2.html 2024-11-15 06:44:06.795469418 +0000 @@ -708,7 +708,7 @@
  • -

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    +

    Improved: The FEValuesViews objects that one gets when writing things like fe_values[velocities] (see Handling vector valued problems) have become a lot smarter. They now compute a significant amount of data at creation time, rather than on the fly. This means that creating such objects becomes more expensive but using them is cheaper. To offset this cost, FEValuesBase objects now create all possible FEValuesViews objects at creation time, rather than whenever you do things like fe_values[velocities], and simply return a reference to a pre-generated object. This turns an $O(N)$ effort into an $O(1)$ effort, where $N$ is the number of cells.
    (WB 2008/12/10)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-11-15 06:44:06.819469632 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_6_2_and_6_3.html 2024-11-15 06:44:06.819469632 +0000 @@ -514,7 +514,7 @@
  • -

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    +

    New: There are new functions FullMatrix::cholesky and FullMatrix::outer_product. FullMatrix::cholesky finds the Cholesky decomposition of a matrix in lower triangular form. FullMatrix::outer_product calculates *this $= VW^T$ where $V$ and $W$ are vectors.
    (Jean Marie Linhart 2009/07/27)

    /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-11-15 06:44:06.843469846 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_1_and_8_2.html 2024-11-15 06:44:06.843469846 +0000 @@ -852,7 +852,7 @@

  • -

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    +

    New: There is now a new class Functions::InterpolatedTensorProductGridData that can be used to (bi-/tri-)linearly interpolate data given on a tensor product mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally determined coefficients, or to assess the accuracy of a solution by comparing with a solution generated by a different code and written in gridded data. There is also a new class Functions::InterpolatedUniformGridData that can perform the same task more efficiently if the data is stored on meshes that are uniform in each coordinate direction.
    (Wolfgang Bangerth, 2013/12/20)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-11-15 06:44:06.871470097 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_8_2_1_and_8_3.html 2024-11-15 06:44:06.871470097 +0000 @@ -567,7 +567,7 @@

  • -

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    +

    New: The VectorTools::integrate_difference() function can now also compute the $H_\text{div}$ seminorm, using the VectorTools::Hdiv_seminorm argument.
    (Zhen Tao, Arezou Ghesmati, Wolfgang Bangerth, 2015/04/17)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-11-15 06:44:06.907470418 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/changes_between_9_1_1_and_9_2_0.html 2024-11-15 06:44:06.907470418 +0000 @@ -1575,7 +1575,7 @@

  • -

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    +

    Improved: The additional roots of the HermiteLikeInterpolation with degree $p$ greater than four have been switched to the roots of the Jacobi polynomial $P^{(4,4)}_{p-3}$, making the interior bubble functions $L_2$ orthogonal and improving the conditioning of interpolation slightly.
    (Martin Kronbichler, 2019/07/12)

  • /usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-11-15 06:44:06.979471061 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAffineConstraints.html 2024-11-15 06:44:06.979471061 +0000 @@ -981,27 +981,27 @@

    Add a constraint to this object. This function adds a constraint of the form

    -\[
+<picture><source srcset=\[
  x_i = \sum_{j=1}^n w_j x_{k_j} + b
-\] +\]" src="form_1654.png"/>

    -

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    +

    where $i$ is the number of the degree of freedom to be constrained and is provided by the constrained_dofs argument. The weights $w_j$ and indices $k_j$ are provided as pairs in the dependencies argument, and the inhomogeneity $b$ is provided by the last argument.

    As an example, if you want to add the constraint

    -\[
+<picture><source srcset=\[
   x_{42} = 0.5 x_{12} + 0.5 x_{36} + 27
-\] +\]" src="form_1656.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {{12, 0.5}, {36, 0.5}}, 27.0);

    On the other hand, if (as one often wants to) you need a constraint of the kind

    -\[
+<picture><source srcset=\[
   x_{42} = 27
-\] +\]" src="form_1657.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 27.0);

    If you want to constrain a degree of freedom to zero, i.e., require that

    -\[
+<picture><source srcset=\[
   x_{42} = 0
-\] +\]" src="form_1658.png"/>

    you would call this function as follows:

    constraints.add_constraint (42, {}, 0.0);

    That said, this special case can be achieved in a more obvious way by calling

    constraints.constrain_dof_to_zero (42);
    @@ -1026,9 +1026,9 @@

    Constrain the given degree of freedom to be zero, i.e., require a constraint like

    -\[
+<picture><source srcset=\[
   x_{42} = 0.
-\] +\]" src="form_1659.png"/>

    Calling this function is equivalent to, but more readable than, saying

    constraints.add_constraint (42, {}, 0.0);

    It is not an error to call this function more than once on the same degree of freedom, but it is an error to call this function on a degree of freedom that has previously been constrained to either a different value than zero, or to a linear combination of degrees of freedom via the add_constraint() function.

    @@ -1161,13 +1161,13 @@
    -

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    +

    Add an entry to a given line. In other words, this function adds a term $a_{ij} x_j$ to the constraints for the $i$th degree of freedom.

    If an entry with the same indices as the one this function call denotes already exists, then this function simply returns provided that the value of the entry is the same. Thus, it does no harm to enter a constraint twice.

    Parameters
    - - - + + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]columnThe index $j$ of the degree of freedom being entered into the constraint for degree of freedom $i$.
    [in]weightThe factor $a_{ij}$ that multiplies $x_j$.
    @@ -1228,11 +1228,11 @@
    -

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    +

    Set an inhomogeneity to the constraint for a degree of freedom. In other words, it adds a constant $b_i$ to the constraint for degree of freedom $i$. For this to work, you need to call add_line() first for the given degree of freedom.

    Parameters
    - - + +
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    [in]constrained_dof_indexThe index $i$ of the degree of freedom that is being constrained.
    [in]valueThe right hand side value $b_i$ for the constraint on the degree of freedom $i$.
    @@ -1260,9 +1260,9 @@

    Close the filling of entries. Since the lines of a matrix of this type are usually filled in an arbitrary order and since we do not want to use associative constrainers to store the lines, we need to sort the lines and within the lines the columns before usage of the matrix. This is done through this function.

    Also, zero entries are discarded, since they are not needed.

    After closing, no more entries are accepted. If the object was already closed, then this function returns immediately.

    -

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
-+ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
-\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    +

    This function also resolves chains of constraints. For example, degree of freedom 13 may be constrained to $u_{13} = \frac{u_3}{2} + \frac{u_7}{2}$ while degree of freedom 7 is itself constrained as $u_{7} = \frac{u_2}{2}
++ \frac{u_4}{2}$. Then, the resolution will be that $u_{13} =
+\frac{u_3}{2} + \frac{u_2}{4} + \frac{u_4}{4}$. Note, however, that cycles in this graph of constraints are not allowed, i.e., for example $u_4$ may not itself be constrained, directly or indirectly, to $u_{13}$ again.

    @@ -1378,7 +1378,7 @@

    This function provides a "view" into a constraint object. Specifically, given a "mask" index set that describes which constraints we are interested in, it returns an AffineConstraints object that contains only those constraints that correspond to degrees of freedom that are listed in the mask, with indices shifted so that they correspond to the position within the mask. This process is the same as how IndexSet::get_view() computes the shifted indices. The function is typically used to extract from an AffineConstraints object corresponding to a DoFHandler only those constraints that correspond to a specific variable (say, to the velocity in a Stokes system) so that the resulting AffineConstraints object can be applied to a single block of a block vector of solutions; in this case, the mask would be the index set of velocity degrees of freedom, as a subset of all degrees of freedom.

    This function can only work if the degrees of freedom selected by the mask are constrained only against other degrees of freedom that are listed in the mask. In the example above, this means that constraints for the selected velocity degrees of freedom are only against other velocity degrees of freedom, but not against any pressure degrees of freedom. If that is not so, an assertion will be triggered.

    -

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    +

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. As part of this, you will want to apply constraints (using the distribute() function of this class) to only the 2-block vector, but for this you need to obtain an AffineConstraints object that represents only those constraints that correspond to the variables in question, and in the order in which they appear in the 2-block vector rather than in global 4-block vectors. This function allows you to extract such an object corresponding to a subset of constraints by applying a mask to the global constraints object that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom.

    @@ -1718,9 +1718,9 @@

    Print the constraints represented by the current object to the given stream.

    For each constraint of the form

    -\[
+<picture><source srcset=\[
  x_{42} = 0.5 x_2 + 0.25 x_{14} + 2.75
-\] +\]" src="form_1668.png"/>

    this function will write a sequence of lines that look like this:

    42 2 : 0.5
    42 14 : 0.25
    @@ -2298,7 +2298,7 @@

    This function takes a matrix of local contributions (local_matrix) corresponding to the degrees of freedom indices given in local_dof_indices and distributes them to the global matrix. In other words, this function implements a scatter operation. In most cases, these local contributions will be the result of an integration over a cell or face of a cell. However, as long as local_matrix and local_dof_indices have the same number of elements, this function is happy with whatever it is given.

    In contrast to the similar function in the DoFAccessor class, this function also takes care of constraints, i.e. if one of the elements of local_dof_indices belongs to a constrained node, then rather than writing the corresponding element of local_matrix into global_matrix, the element is distributed to the entries in the global matrix to which this particular degree of freedom is constrained.

    -

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    +

    With this scheme, we never write into rows or columns of constrained degrees of freedom. In order to make sure that the resulting matrix can still be inverted, we need to do something with the diagonal elements corresponding to constrained nodes. Thus, if a degree of freedom in local_dof_indices is constrained, we distribute the corresponding entries in the matrix, but also add the absolute value of the diagonal entry of the local matrix to the corresponding entry in the global matrix. Assuming the discretized operator is positive definite, this guarantees that the diagonal entry is always non-zero, positive, and of the same order of magnitude as the other entries of the matrix. On the other hand, when solving a source problem $Au=f$ the exact value of the diagonal element is not important, since the value of the respective degree of freedom will be overwritten by the distribute() call later on anyway.

    Note
    The procedure described above adds an unforeseeable number of artificial eigenvalues to the spectrum of the matrix. Therefore, it is recommended to use the equivalent function with two local index vectors in such a case.

    By using this function to distribute local contributions to the global object, one saves the call to the condense function after the vectors and matrices are fully assembled.

    Note
    This function in itself is thread-safe, i.e., it works properly also when several threads call it simultaneously. However, the function call is only thread-safe if the underlying global matrix allows for simultaneous access and the access is not to rows with the same global index at the same time. This needs to be made sure from the caller's site. There is no locking mechanism inside this method to prevent data races.
    @@ -2340,7 +2340,7 @@

    This function does almost the same as the function above but can treat general rectangular matrices. The main difference to achieve this is that the diagonal entries in constrained rows are left untouched instead of being filled with arbitrary values.

    -

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    +

    Since the diagonal entries corresponding to eliminated degrees of freedom are not set, the result may have a zero eigenvalue, if applied to a square matrix. This has to be considered when solving the resulting problems. For solving a source problem $Au=f$, it is possible to set the diagonal entry after building the matrix by a piece of code of the form

    for (unsigned int i=0;i<matrix.m();++i)
    if (constraints.is_constrained(i))
    matrix.diag_element(i) = 1.;
    @@ -2629,7 +2629,7 @@
    -

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    +

    Given a vector, set all constrained degrees of freedom to values so that the constraints are satisfied. For example, if the current object stores the constraint $x_3=\frac 12 x_1 + \frac 12 x_2$, then this function will read the values of $x_1$ and $x_2$ from the given vector and set the element $x_3$ according to this constraints. Similarly, if the current object stores the constraint $x_{42}=208$, then this function will set the 42nd element of the given vector to 208.

    Note
    If this function is called with a parallel vector vec, then the vector must not contain ghost elements.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-11-15 06:44:07.015471383 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAlgorithms_1_1ThetaTimestepping.html 2024-11-15 06:44:07.015471383 +0000 @@ -315,7 +315,7 @@
    }
    size_type n() const
    size_type m() const
    -

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    +

    Now we need to study the application of the implicit and explicit operator. We assume that the pointer matrix points to the matrix created in the main program (the constructor did this for us). Here, we first get the time step size from the AnyData object that was provided as input. Then, if we are in the first step or if the timestep has changed, we fill the local matrix $m$, such that with the given matrix $M$, it becomes

    \[ m = I - \Delta t M. \]

    After we have worked off the notifications, we clear them, such that the matrix is only generated when necessary.

    @@ -1156,7 +1156,7 @@

    The operator computing the explicit part of the scheme. This will receive in its input data the value at the current time with name "Current time solution". It should obtain the current time and time step size from explicit_data().

    -

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    +

    Its return value is $ Mu+cF(u) $, where $u$ is the current state vector, $M$ the mass matrix, $F$ the operator in space and $c$ is the adjusted time step size $(1-\theta) \Delta t$.

    Definition at line 415 of file theta_timestepping.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-11-15 06:44:07.047471668 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classAnisotropicPolynomials.html 2024-11-15 06:44:07.047471668 +0000 @@ -177,10 +177,10 @@

    Detailed Description

    template<int dim>
    class AnisotropicPolynomials< dim >

    Anisotropic tensor product of given polynomials.

    -

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
-= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    -

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    -

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    +

    Given one-dimensional polynomials $P^x_1(x), P^x_2(x), \ldots$ in $x$-direction, $P^y_1(y), P^y_2(y), \ldots$ in $y$-direction, and so on, this class generates polynomials of the form $Q_{ijk}(x,y,z)
+= P^x_i(x)P^y_j(y)P^z_k(z)$. (With obvious generalization if dim is in fact only 2. If dim is in fact only 1, then the result is simply the same set of one-dimensional polynomials passed to the constructor.)

    +

    If the elements of each set of base polynomials are mutually orthogonal on the interval $[-1,1]$ or $[0,1]$, then the tensor product polynomials are orthogonal on $[-1,1]^d$ or $[0,1]^d$, respectively.

    +

    The resulting dim-dimensional tensor product polynomials are ordered as follows: We iterate over the $x$ coordinates running fastest, then the $y$ coordinate, etc. For example, for dim==2, the first few polynomials are thus $P^x_1(x)P^y_1(y)$, $P^x_2(x)P^y_1(y)$, $P^x_3(x)P^y_1(y)$, ..., $P^x_1(x)P^y_2(y)$, $P^x_2(x)P^y_2(y)$, $P^x_3(x)P^y_2(y)$, etc.

    Definition at line 321 of file tensor_product_polynomials.h.

    Constructor & Destructor Documentation

    @@ -695,7 +695,7 @@
    -

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    +

    Each tensor product polynomial $p_i$ is a product of one-dimensional polynomials in each space direction. Compute the indices of these one-dimensional polynomials for each space direction, given the index i.

    Definition at line 713 of file tensor_product_polynomials.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-11-15 06:44:07.079471954 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArpackSolver.html 2024-11-15 06:44:07.079471954 +0000 @@ -243,14 +243,14 @@

    Detailed Description

    Interface for using ARPACK. ARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines dnaupd and dneupd of ARPACK. If the operator is specified to be symmetric we use the symmetric interface dsaupd and dseupd of ARPACK instead. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes with serial objects in the following way:

    solver.solve(A, B, OP, lambda, x, size_of_spectrum);
    SolverControl & solver_control
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells ARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector that will contain the eigenvectors computed, and OP is an inverse operation for the matrix A. Shift and invert transformation around zero is applied.

    Through the AdditionalData the user can specify some of the parameters to be set.

    For further information on how the ARPACK routines dsaupd, dseupd, dnaupd and dneupd work and also how to set the parameters appropriately please take a look into the ARPACK manual.

    Note
    Whenever you eliminate degrees of freedom using AffineConstraints, you generate spurious eigenvalues and eigenvectors. If you make sure that the diagonals of eliminated matrix rows are all equal to one, you get a single additional eigenvalue. But beware that some functions in deal.II set these diagonals to rather arbitrary (from the point of view of eigenvalue problems) values. See also step-36 for an example.
    @@ -523,7 +523,7 @@
    -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the dsaupd and dseupd or dnaupd and dneupd functions of ARPACK.

    The function returns a vector of eigenvalues of length n and a vector of eigenvectors of length n in the symmetric case and of length n+1 in the non-symmetric case. In the symmetric case all eigenvectors are real. In the non-symmetric case complex eigenvalues always occur as complex conjugate pairs. Therefore the eigenvector for an eigenvalue with nonzero complex part is stored by putting the real and the imaginary parts in consecutive real-valued vectors. The eigenvector of the complex conjugate eigenvalue does not need to be stored, since it is just the complex conjugate of the stored eigenvector. Thus, if the last n-th eigenvalue has a nonzero imaginary part, Arpack needs in total n+1 real-valued vectors to store real and imaginary parts of the eigenvectors.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-11-15 06:44:07.127472383 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classArrayView.html 2024-11-15 06:44:07.127472383 +0000 @@ -1027,7 +1027,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-11-15 06:44:07.151472597 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBarycentricPolynomial.html 2024-11-15 06:44:07.151472597 +0000 @@ -166,7 +166,7 @@ (x, y) = c_0 (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2). \]" src="form_638.png"/>

    -

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    +

    where each value $c_i$ is the relative weight of each vertex (so the centroid is, in 2d, where each $c_i = 1/3$). Since we only consider convex combinations we can rewrite this equation as

    \[
   (x, y) = (1 - c_1 - c_2) (x_0, y_0) + c_1 (x_1, y_1) + c_2 (x_2, y_2).
/usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-11-15 06:44:07.171472776 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBaseQR.html	2024-11-15 06:44:07.171472776 +0000
@@ -169,8 +169,8 @@
 <a name=

    Detailed Description

    template<typename VectorType>
    class BaseQR< VectorType >

    A base class for thin QR implementations.

    -

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    -

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    +

    This class and classes derived from it are meant to build $Q$ and $R$ matrices one row/column at a time, i.e., by growing $R$ matrix from an empty $0\times 0$ matrix to $N\times N$, where $N$ is the number of added column vectors.

    +

    As a consequence, matrices which have the same number of rows as each vector (i.e. $Q$ matrix) is stored as a collection of vectors of VectorType.

    Definition at line 43 of file qr.h.

    Member Typedef Documentation

    @@ -413,7 +413,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -447,7 +447,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -481,7 +481,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    @@ -515,7 +515,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implemented in ImplicitQR< VectorType >, and QR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-11-15 06:44:07.199473026 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockIndices.html 2024-11-15 06:44:07.199473026 +0000 @@ -223,7 +223,7 @@ void swap (BlockIndices &u, BlockIndices &v) noexcept &#href_anchor"details" id="details">

    Detailed Description

    -

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    +

    BlockIndices represents a range of indices (such as the range $[0,N)$ of valid indices for elements of a vector) and how this one range is broken down into smaller but contiguous "blocks" (such as the velocity and pressure parts of a solution vector). In particular, it provides the ability to translate between global indices and the indices within a block. This class is used, for example, in the BlockVector, BlockSparsityPattern, and BlockMatrixBase classes.

    The information that can be obtained from this class falls into two groups. First, it is possible to query the global size of the index space (through the total_size() member function), and the number of blocks and their sizes (via size() and the block_size() functions).

    Secondly, this class manages the conversion of global indices to the local indices within this block, and the other way around. This is required, for example, when you address a global element in a block vector and want to know within which block this is, and which index within this block it corresponds to. It is also useful if a matrix is composed of several blocks, where you have to translate global row and column indices to local ones.

    See also
    Block (linear algebra)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-11-15 06:44:07.251473490 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockLinearOperator.html 2024-11-15 06:44:07.251473490 +0000 @@ -800,9 +800,9 @@
    LinearOperator< Range, Domain, BlockPayload::BlockType > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, BlockPayload::BlockType > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -843,9 +843,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -1543,7 +1543,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1556,60 +1556,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

    @@ -1655,10 +1655,10 @@
    LinearOperator< Domain, Range, BlockPayload::BlockType > inverse_operator(const LinearOperator< Range, Domain, BlockPayload::BlockType > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1681,8 +1681,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

    See also
    Block (linear algebra)
    @@ -1724,7 +1724,7 @@
    -

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    +

    Return the number of blocks in a row (i.e, the number of "block columns", or the number $n$, if interpreted as a $m\times n$ block system).

    Definition at line 302 of file block_linear_operator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-11-15 06:44:07.299473919 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockMatrixBase.html 2024-11-15 06:44:07.299473919 +0000 @@ -1309,7 +1309,7 @@ const BlockVectorType & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1398,7 +1398,7 @@ const BlockVectorType & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1757,7 +1757,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -1881,7 +1881,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-11-15 06:44:07.359474455 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrix.html 2024-11-15 06:44:07.359474455 +0000 @@ -954,7 +954,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 398 of file block_sparse_matrix.h.

    @@ -1082,7 +1082,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 442 of file block_sparse_matrix.h.

    @@ -2079,7 +2079,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2184,7 +2184,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2627,7 +2627,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2735,7 +2735,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-11-15 06:44:07.391474740 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockSparseMatrixEZ.html 2024-11-15 06:44:07.391474740 +0000 @@ -767,7 +767,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 370 of file block_sparse_matrix_ez.h.

    @@ -792,7 +792,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 408 of file block_sparse_matrix_ez.h.

    @@ -817,7 +817,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    Definition at line 390 of file block_sparse_matrix_ez.h.

    @@ -842,7 +842,7 @@ const BlockVector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add() but takes the transposed matrix.

    Definition at line 428 of file block_sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-11-15 06:44:07.451475276 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVector.html 2024-11-15 06:44:07.451475276 +0000 @@ -1851,7 +1851,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1903,7 +1903,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1929,7 +1929,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1955,7 +1955,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-11-15 06:44:07.495475670 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBlockVectorBase.html 2024-11-15 06:44:07.495475670 +0000 @@ -1277,7 +1277,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1317,7 +1317,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1337,7 +1337,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1357,7 +1357,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-11-15 06:44:07.527475955 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classBoundingBox.html 2024-11-15 06:44:07.527475955 +0000 @@ -812,7 +812,7 @@

    Apply the affine transformation that transforms this BoundingBox to a unit BoundingBox object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $G(B) = \hat{B}$ and apply it to point.

    Definition at line 311 of file bounding_box.cc.

    @@ -835,7 +835,7 @@

    Apply the affine transformation that transforms the unit BoundingBox object to this object.

    -

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    +

    If $B$ is this bounding box, and $\hat{B}$ is the unit bounding box, compute the affine mapping that satisfies $F(\hat{B}) = B$ and apply it to point.

    Definition at line 326 of file bounding_box.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-11-15 06:44:07.567476313 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCUDAWrappers_1_1SparseMatrix.html 2024-11-15 06:44:07.567476313 +0000 @@ -793,7 +793,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M \cdot src$ with $M$ being this matrix.

    Definition at line 511 of file cuda_sparse_matrix.cc.

    @@ -816,7 +816,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T \cdot src$ with $M$ being this matrix. This function does the same as vmult() but takes this transposed matrix.

    Definition at line 529 of file cuda_sparse_matrix.cc.

    @@ -839,7 +839,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    +

    Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$ with $M$ being this matrix.

    Definition at line 547 of file cuda_sparse_matrix.cc.

    @@ -862,7 +862,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & src&#href_anchor"memdoc"> -

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    +

    Adding matrix-vector multiplication. Add $M^T \cdot src$ to $dst$ with $M$ being this matrix. This function foes the same as vmult_add() but takes the transposed matrix.

    Definition at line 565 of file cuda_sparse_matrix.cc.

    @@ -884,7 +884,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g., in the finite context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    Definition at line 583 of file cuda_sparse_matrix.cc.

    @@ -908,7 +908,7 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Definition at line 596 of file cuda_sparse_matrix.cc.

    @@ -936,8 +936,8 @@ const LinearAlgebra::CUDAWrappers::Vector< Number > & b&#href_anchor"memdoc"> -

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    -

    Source $x$ and destination $dst$ must not be the same vector.

    +

    Compute the residual of an equation $M \cdot x=b$, where the residual is defined to be $r=b-M \cdot x$. Write the residual into $dst$. The $l_2$ norm of the residual vector is returned.

    +

    Source $x$ and destination $dst$ must not be the same vector.

    Definition at line 610 of file cuda_sparse_matrix.cc.

    @@ -959,8 +959,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.

    Definition at line 625 of file cuda_sparse_matrix.cc.

    @@ -982,8 +982,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural norm that is compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$.

    Definition at line 644 of file cuda_sparse_matrix.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-11-15 06:44:07.667477206 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCellAccessor.html 2024-11-15 06:44:07.667477206 +0000 @@ -4183,7 +4183,7 @@

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    -

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    +

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    Note
    If dim<spacedim we first project p onto the plane.
    @@ -4242,15 +4242,15 @@
    -

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    -\[
+<p>Return the barycenter (also called centroid) of the object. The barycenter for an object <picture><source srcset=$K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    +\[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
-\] +\]" src="form_1558.png"/>

    where the measure of the object is given by

    -\[
+<picture><source srcset=\[
   |K| = \int_K \mathbf 1 \; \textrm{d}x.
-\] +\]" src="form_1559.png"/>

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-11-15 06:44:07.707477563 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChartManifold.html 2024-11-15 06:44:07.707477563 +0000 @@ -206,37 +206,37 @@

    Detailed Description

    template<int dim, int spacedim = dim, int chartdim = dim>
    class ChartManifold< dim, spacedim, chartdim >

    This class describes mappings that can be expressed in terms of charts. Specifically, this class with its template arguments describes a chart of dimension chartdim, which is part of a Manifold<dim,spacedim> and is used in an object of type Triangulation<dim,spacedim>: It specializes a Manifold of dimension chartdim embedded in a manifold of dimension spacedim, for which you have explicit pull_back() and push_forward() transformations. Its use is explained in great detail in step-53.

    -

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    -\[ F: \mathcal{B} \subset
-R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \] +

    This is a helper class which is useful when you have an explicit map from an Euclidean space of dimension chartdim to an Euclidean space of dimension spacedim which represents your manifold, i.e., when your manifold $\mathcal{M}$ can be represented by a map

    +\[ F: \mathcal{B} \subset
+R^{\text{chartdim}} \mapsto \mathcal{M} \subset R^{\text{spacedim}} \]

    (the push_forward() function) and that admits the inverse transformation

    -\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
-\subset R^{\text{chartdim}} \] +\[ F^{-1}: \mathcal{M} \subset R^{\text{spacedim}} \mapsto \mathcal{B}
+\subset R^{\text{chartdim}} \]

    (the pull_back() function).

    The get_new_point() function of the ChartManifold class is implemented by calling the pull_back() method for all surrounding_points, computing their weighted average in the chartdim Euclidean space, and calling the push_forward() method with the resulting point, i.e.,

    -\[
-\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \] +\[
+\mathbf x^{\text{new}} = F(\sum_i w_i F^{-1}(\mathbf x_i)).  \]

    Derived classes are required to implement the push_forward() and the pull_back() methods. All other functions (with the exception of the push_forward_gradient() function, see below) that are required by mappings will then be provided by this class.

    Providing function gradients

    -

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    +

    In order to compute vectors that are tangent to the manifold (for example, tangent to a surface embedded in higher dimensional space, or simply the three unit vectors of ${\mathbb R}^3$), one needs to also have access to the gradient of the push-forward function $F$. The gradient is the matrix $(\nabla F)_{ij}=\partial_j F_i$, where we take the derivative with regard to the chartdim reference coordinates on the flat Euclidean space in which $\mathcal B$ is located. In other words, at a point $\mathbf x$, $\nabla F(\mathbf x)$ is a matrix of size spacedim times chartdim.

    Only the ChartManifold::get_tangent_vector() function uses the gradient of the push-forward, but only a subset of all finite element codes actually require the computation of tangent vectors. Consequently, while derived classes need to implement the abstract virtual push_forward() and pull_back() functions of this class, they do not need to implement the virtual push_forward_gradient() function. Rather, that function has a default implementation (and consequently is not abstract, therefore not forcing derived classes to overload it), but the default implementation clearly can not compute anything useful and therefore simply triggers and exception.

    A note on the template arguments

    The dimension arguments chartdim, dim and spacedim must satisfy the following relationships:

    dim <= spacedim
    chartdim <= spacedim

    However, there is no a priori relationship between dim and chartdim. For example, if you want to describe a mapping for an edge (a 1d object) in a 2d triangulation embedded in 3d space, you could do so by parameterizing it via a line

    -\[
+<picture><source srcset=\[
      F: [0,1] \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1502.png"/>

    in which case chartdim is 1. On the other hand, there is no reason why one can't describe this as a mapping

    -\[
+<picture><source srcset=\[
      F: {\mathbb R}^3 \rightarrow {\mathbb R}^3
-  \] + \]" src="form_1503.png"/>

    -

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    +

    in such a way that the line $[0,1]\times \{0\}\times \{0\}$ happens to be mapped onto the edge in question. Here, chartdim is 3. This may seem cumbersome but satisfies the requirements of an invertible function $F$ just fine as long as it is possible to get from the edge to the pull-back space and then back again. Finally, given that we are dealing with a 2d triangulation in 3d, one will often have a mapping from, say, the 2d unit square or unit disk to the domain in 3d space, and the edge in question may simply be the mapped edge of the unit domain in 2d space. In this case, chartdim is 2.

    Definition at line 907 of file manifold.h.

    Member Typedef Documentation

    @@ -561,7 +561,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -595,24 +595,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -621,11 +621,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-11-15 06:44:07.759478028 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparseMatrix.html 2024-11-15 06:44:07.763478063 +0000 @@ -1049,7 +1049,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by ChunkSparsityPattern::symmetrize().

    @@ -1380,7 +1380,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    @@ -1405,7 +1405,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -1454,8 +1454,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    @@ -1475,8 +1475,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann : Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-11-15 06:44:07.803478421 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classChunkSparsityPattern.html 2024-11-15 06:44:07.803478421 +0000 @@ -1136,7 +1136,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 519 of file chunk_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-11-15 06:44:07.835478706 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCompositionManifold.html 2024-11-15 06:44:07.835478706 +0000 @@ -579,24 +579,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -605,11 +605,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-11-15 06:44:07.875479063 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classCylindricalManifold.html 2024-11-15 06:44:07.875479063 +0000 @@ -414,7 +414,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the cylindrical coordinates $(r, \phi, \lambda)$ for the given space point where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Implements ChartManifold< dim, dim, 3 >.

    @@ -446,7 +446,7 @@
    -

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the Cartesian coordinates for a chart point given in cylindrical coordinates $(r, \phi, \lambda)$, where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1111 of file manifold_lib.cc.

    @@ -476,7 +476,7 @@
    -

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    +

    Compute the derivatives of the mapping from cylindrical coordinates $(r, \phi, \lambda)$ to cartesian coordinates where $r$ denotes the distance from the axis, $\phi$ the angle between the given point and the computed normal direction, and $\lambda$ the axial position.

    Definition at line 1131 of file manifold_lib.cc.

    @@ -726,7 +726,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -756,24 +756,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -782,11 +782,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-11-15 06:44:07.903479314 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessor.html 2024-11-15 06:44:07.903479314 +0000 @@ -196,7 +196,7 @@

    As a consequence, DataOut is forced to take things apart into their real and imaginary parts, and both are output as separate quantities. This is the case for data that is written directly to a file by DataOut, but it is also the case for data that is first routed through DataPostprocessor objects (or objects of their derived classes): All these objects see is a collection of real values, even if the underlying solution vector was complex-valued.

    All of this has two implications:

    • If a solution vector is complex-valued, then this results in at least two input components at each evaluation point. As a consequence, the DataPostprocessor::evaluate_scalar_field() function is never called, even if the underlying finite element had only a single solution component. Instead, DataOut will always call DataPostprocessor::evaluate_vector_field().
    • -
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.
    • +
    • Implementations of the DataPostprocessor::evaluate_vector_field() in derived classes must understand how the solution values are arranged in the DataPostprocessorInputs::Vector objects they receive as input. The rule here is: If the finite element has $N$ vector components (including the case $N=1$, i.e., a scalar element), then the inputs for complex-valued solution vectors will have $2N$ components. These first contain the values (or gradients, or Hessians) of the real parts of all solution components, and then the values (or gradients, or Hessians) of the imaginary parts of all solution components.

    step-58 provides an example of how this class (or, rather, the derived DataPostprocessorScalar class) is used in a complex-valued situation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-11-15 06:44:07.931479564 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorTensor.html 2024-11-15 06:44:07.931479564 +0000 @@ -269,7 +269,7 @@

    These pictures show an ellipse representing the gradient tensor at, on average, every tenth mesh point. You may want to read through the documentation of the VisIt visualization program (see https://wci.llnl.gov/simulation/computer-codes/visit/) for an interpretation of how exactly tensors are visualizated.

    -

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    +

    In elasticity, one is often interested not in the gradient of the displacement, but in the "strain", i.e., the symmetrized version of the gradient $\varepsilon=\frac 12 (\nabla u + \nabla u^T)$. This is easily facilitated with the following minor modification:

    template <int dim>
    class StrainPostprocessor : public DataPostprocessorTensor<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-11-15 06:44:07.959479814 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDataPostprocessorVector.html 2024-11-15 06:44:07.959479814 +0000 @@ -260,7 +260,7 @@

    In the second image, the background color corresponds to the magnitude of the gradient vector and the vector glyphs to the gradient itself. It may be surprising at first to see that from each vertex, multiple vectors originate, going in different directions. But that is because the solution is only continuous: in general, the gradient is discontinuous across edges, and so the multiple vectors originating from each vertex simply represent the differing gradients of the solution at each adjacent cell.

    -

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    +

    The output above – namely, the gradient $\nabla u$ of the solution – corresponds to the temperature gradient if one interpreted step-6 as solving a steady-state heat transfer problem. It is very small in the central part of the domain because in step-6 we are solving an equation that has a coefficient $a(\mathbf x)$ that is large in the central part and small on the outside. This can be thought as a material that conducts heat well, and consequently the temperature gradient is small. On the other hand, the "heat flux" corresponds to the quantity $a(\mathbf x) \nabla u(\mathbf x)$. For the solution of that equation, the flux should be continuous across the interface. This is easily verified by the following modification of the postprocessor:

    template <int dim>
    class HeatFluxPostprocessor : public DataPostprocessorVector<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-11-15 06:44:07.979479993 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1SecondDerivative.html 2024-11-15 06:44:07.979479993 +0000 @@ -248,7 +248,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 490 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-11-15 06:44:07.995480135 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeApproximation_1_1internal_1_1ThirdDerivative.html 2024-11-15 06:44:07.995480135 +0000 @@ -243,7 +243,7 @@
    -

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    +

    Return the norm of the derivative object. Here, for the (symmetric) tensor of second derivatives, we choose the absolute value of the largest eigenvalue, which is the matrix norm associated to the $l_2$ norm of vectors. It is also the largest value of the curvature of the solution.

    Definition at line 629 of file derivative_approximation.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-11-15 06:44:08.027480421 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDerivativeForm.html 2024-11-15 06:44:08.027480421 +0000 @@ -416,7 +416,7 @@
    -

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    +

    Converts a DerivativeForm <order, dim, dim, Number> to Tensor<order+1, dim, Number>. In particular, if order == 1 and the derivative is the Jacobian of $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$.

    @@ -476,8 +476,8 @@
    -

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
-\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    +

    Compute the Frobenius norm of this form, i.e., the expression $\sqrt{\sum_{ij} |DF_{ij}|^2} =
+\sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|^2}$.

    @@ -497,7 +497,7 @@
    -

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    +

    Compute the volume element associated with the jacobian of the transformation $\mathbf F$. That is to say if $DF$ is square, it computes $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T \,DF)}$.

    @@ -517,9 +517,9 @@
    -

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
-F$ is a square matrix (i.e., $\mathbf F:
-{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    +

    Assuming that the current object stores the Jacobian of a mapping $\mathbf F$, then the current function computes the covariant form of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where $\mathbf G = (\nabla \mathbf F)^{T}(\nabla \mathbf F)$. If $\nabla \mathbf
+F$ is a square matrix (i.e., $\mathbf F:
+{\mathbb R}^n \mapsto {\mathbb R}^n$), then this function simplifies to computing $\nabla {\mathbf F}^{-T}$.

    @@ -575,7 +575,7 @@
    -

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    +

    Auxiliary function that computes $A T^{T}$ where A represents the current object.

    @@ -634,7 +634,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -768,11 +768,11 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    -\[
+<p>Similar to the previous <a class=apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +\[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
-\] +\]" src="form_404.png"/>

    Definition at line 589 of file derivative_form.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-11-15 06:44:08.075480850 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1EnergyFunctional.html 2024-11-15 06:44:08.075480850 +0000 @@ -573,7 +573,7 @@
    -

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Evaluation of the residual for a chosen set of degree of freedom values. Underlying this is the computation of the gradient (first derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \mathbf{r}(\mathbf{X}) =
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{X}}
@@ -618,7 +618,7 @@
   </tr>
 </table>
 </div><div class= -

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    +

    Compute the linearization of the residual vector around a chosen set of degree of freedom values. Underlying this is the computation of the Hessian (second derivative) of the scalar function $\Psi$ with respect to all independent variables, i.e.

    \[
   \frac{\partial\mathbf{r}(\mathbf{X})}{\partial\mathbf{X}}
     =
/usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html	2024-11-15 06:44:08.127481314 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1ScalarFunction.html	2024-11-15 06:44:08.127481314 +0000
@@ -715,13 +715,13 @@
   </tr>
 </table>
 </div><div class= -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right] =
 \frac{\partial^{2}\Psi(\mathbf{X})}{\partial\mathbf{B} \otimes
 \partial\mathbf{A}}
-\] +\]" src="form_956.png"/>

    Parameters
    @@ -764,11 +764,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_957.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Hessian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -805,11 +805,11 @@
    -

    Extract the function Hessian for a subset of independent variables $\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the function Hessian for a subset of independent variables <picture><source srcset=$\mathbf{A},\mathbf{B} \subset \mathbf{X}$, i.e.

    +\[
   \frac{}{\partial\mathbf{B}} \left[
 \frac{\partial\Psi(\mathbf{X})}{\partial\mathbf{A}} \right]
-\] +\]" src="form_957.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-11-15 06:44:08.183481815 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDifferentiation_1_1AD_1_1VectorFunction.html 2024-11-15 06:44:08.183481815 +0000 @@ -534,7 +534,7 @@
    -

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Register the definition of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -568,7 +568,7 @@
    [in]funcsA vector of recorded functions that defines the dependent variables.
    const ExtractorType & extractor&#href_anchor"memdoc"> -

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    +

    Register the definition of the vector field $\hat{\mathbf{g}}(\mathbf{X}) \subset \boldsymbol{\Psi}(\mathbf{X})$ that may represent a subset of the dependent variables.

    Parameters
    @@ -598,7 +598,7 @@
    [in]funcsThe recorded functions that define a set of dependent variables.
    -

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    +

    Compute the value of the vector field $\boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -627,10 +627,10 @@
    [out]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values. The output values vector has a length corresponding to n_dependent_variables.

    Compute the Jacobian (first derivative) of the vector field with respect to all independent variables, i.e.

    -\[
+<picture><source srcset=\[
   \mathbf{J}(\boldsymbol{\Psi})
      = \frac{\partial\boldsymbol{\Psi}(\mathbf{X})}{\partial\mathbf{X}}
-\] +\]" src="form_960.png"/>

    Parameters
    @@ -673,7 +673,7 @@
    -

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    +

    Extract the set of functions' values for a subset of dependent variables $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$.

    Parameters
    @@ -719,13 +719,13 @@
    [in]valuesA Vector object with the value for each component of the vector field evaluated at the point defined by the independent variable values.
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_962.png"/>

    -

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    +

    The first index of the Jacobian matrix $\mathbf{J}(\mathbf{g})$ relates to the dependent variables, while the second index relates to the independent variables.

    Parameters
    @@ -767,11 +767,11 @@
    [in]jacobianThe Jacobian of the vector function with respect to all independent variables, i.e., that returned by compute_jacobian().
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_962.png"/>

    This function is a specialization of the above for rank-0 tensors (scalars). This corresponds to extracting a single entry of the Jacobian matrix because both extractors imply selection of just a single row or column of the matrix.

    @@ -808,11 +808,11 @@
    -

    Extract the Jacobian of the subset of dependent functions $\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    -\[
+<p>Extract the Jacobian of the subset of dependent functions <picture><source srcset=$\mathbf{g} \subset \boldsymbol{\Psi}(\mathbf{X})$ for a subset of independent variables $\mathbf{A} \subset \mathbf{X}$, i.e.

    +\[
   \mathbf{J}(\mathbf{g})
      = \frac{\partial\mathbf{g}(\mathbf{X})}{\partial\mathbf{A}}
-\] +\]" src="form_962.png"/>

    This function is a specialization of the above for rank-4 symmetric tensors.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-11-15 06:44:08.211482064 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDiscreteTime.html 2024-11-15 06:44:08.211482064 +0000 @@ -165,11 +165,11 @@  

    Detailed Description

    -

    This class provides a means to keep track of the simulation time in a time-dependent simulation. It manages stepping forward from a start time $T_{\text{start}}$ to an end time $T_{\text{end}}$. It also allows adjusting the time step size during the simulation. This class provides the necessary interface to be incorporated in any time-dependent simulation. The usage of this class is demonstrated in step-19 (and step-83) as well as step-21.

    +

    This class provides a means to keep track of the simulation time in a time-dependent simulation. It manages stepping forward from a start time $T_{\text{start}}$ to an end time $T_{\text{end}}$. It also allows adjusting the time step size during the simulation. This class provides the necessary interface to be incorporated in any time-dependent simulation. The usage of this class is demonstrated in step-19 (and step-83) as well as step-21.

    This class provides a number of invariants that are guaranteed to be true at all times.

      -
    • The current simulation time is within the closed interval between the start time and the end time ( $T_{\text{start}} \le t \le T_{\text{end}}$).
    • -
    • Whenever time is incremented, the step size is positive ( $dt > 0$). In other words, time advances in strictly ascending order ( $m < n \Leftrightarrow t_m < t_n$).
    • +
    • The current simulation time is within the closed interval between the start time and the end time ( $T_{\text{start}} \le t \le T_{\text{end}}$).
    • +
    • Whenever time is incremented, the step size is positive ( $dt > 0$). In other words, time advances in strictly ascending order ( $m < n \Leftrightarrow t_m < t_n$).

    The model this class follows is that one sets a desired time step length either through the constructor or using set_desired_next_step_size() function. This step size will then be used in all following calls to the advance_time() function, but may be adjusted slightly towards the end of the simulation to ensure that the simulation time hits the end time exactly. The adjustment is useful for the following reasons:

    Let's say that you loop over all of the time steps by using a for loop

    for (DiscreteTime time(0., 1., 0.3);
    @@ -187,34 +187,34 @@
    time.advance_time();
    }
    -

    In the above example the time starts at $T_{\text{start}} = 0$ until $T_{\text{end}}=1$. Assuming the time step $dt = 0.3$ is not modified inside the loop, the time is advanced from $t = 0$ to $t = 0.3$, $t = 0.6$, $t =
-0.9$ and finally it reaches the end time at $t = 1.0$. Here, the final step size needs to be reduced from its desired value of 0.3 to $dt = 0.1$ in order to ensure that we finish the simulation exactly at the specified end time. In fact, you should assume that not only the last time step length may be adjusted, but also previously ones – for example, this class may take the liberty to spread the decrease in time step size out over several time steps and increment time from $t=0$, to $0.3$, $0.6$, $0.8$, and finally $t=T_{\text{end}}=1$ to avoid too large a change in time step size from one step to another.

    +

    In the above example the time starts at $T_{\text{start}} = 0$ until $T_{\text{end}}=1$. Assuming the time step $dt = 0.3$ is not modified inside the loop, the time is advanced from $t = 0$ to $t = 0.3$, $t = 0.6$, $t =
+0.9$ and finally it reaches the end time at $t = 1.0$. Here, the final step size needs to be reduced from its desired value of 0.3 to $dt = 0.1$ in order to ensure that we finish the simulation exactly at the specified end time. In fact, you should assume that not only the last time step length may be adjusted, but also previously ones – for example, this class may take the liberty to spread the decrease in time step size out over several time steps and increment time from $t=0$, to $0.3$, $0.6$, $0.8$, and finally $t=T_{\text{end}}=1$ to avoid too large a change in time step size from one step to another.

    The other situation in which the time step needs to be adjusted (this time to slightly larger values) is if a time increment falls just short of the final time. Imagine, for example, a similar situation as above, but with different end time:

    for (DiscreteTime time(0., 1.21, 0.3);
    time.is_at_end() == false;
    time.advance_time())
    {
    // Insert simulation code here
    }
    -

    Here, the time step from $t=0.9$ to $t=1.2$ falls just short of the final time $T_{\text{end}}=1.21$. Instead of following up with a very small step of length $dt=0.01$, the class stretches the last time step (or last time steps) slightly to reach the desired end time.

    +

    Here, the time step from $t=0.9$ to $t=1.2$ falls just short of the final time $T_{\text{end}}=1.21$. Instead of following up with a very small step of length $dt=0.01$, the class stretches the last time step (or last time steps) slightly to reach the desired end time.

    The examples above make clear that the time step size given to this class is only a desired step size. You can query the actual time step size using the get_next_step_size() function.

    Details of time-stepping

    Since time is marched forward in a discrete manner in our simulations, we need to discuss how we increment time. During time stepping we enter two separate alternating regimes in every step.

    -

    One thing to note is that, during the update phase, $dt$ is referred to either next or previous time step size, depending on whether advance_time() has been called yet. The notion of current time step size is ill-defined. In fact, in the update stage the definition of every variable depends on whether it has been updated yet or not, hence the name the inconsistent stage.

    +

    One thing to note is that, during the update phase, $dt$ is referred to either next or previous time step size, depending on whether advance_time() has been called yet. The notion of current time step size is ill-defined. In fact, in the update stage the definition of every variable depends on whether it has been updated yet or not, hence the name the inconsistent stage.

    The following code snippet shows the code sections for the snapshot stage and the update stage in the context of a complete time-dependent simulation. This code follows the coding conventions incorporated in the tutorial examples. Note that even though this example is written in the format of a for loop, it can equivalently be written as a while or do while loop (as shown in step-21).

    // pre-processing/setup stage {
    make_grid();
    setup_system();
    @@ -618,8 +618,8 @@

    Set the actual value of the next time step size. By calling this method, we are indicating the next time advance_time() is called, time_step_size is to be used to advance the simulation time.

    -
    Note
    The difference between set_next_step_size() and set_desired_next_step_size() is that the former uses the provided $dt$ exactly without any adjustment, but produces an error (in debug mode) if $dt$ is not in the acceptable range. Generally, set_desired_next_step_size() is the preferred method because it can adjust the $dt$ intelligently, based on $T_{\text{end}}$.
    -
    Precondition
    $0 < dt \le T_{\text{end}} - t$.
    +
    Note
    The difference between set_next_step_size() and set_desired_next_step_size() is that the former uses the provided $dt$ exactly without any adjustment, but produces an error (in debug mode) if $dt$ is not in the acceptable range. Generally, set_desired_next_step_size() is the preferred method because it can adjust the $dt$ intelligently, based on $T_{\text{end}}$.
    +
    Precondition
    $0 < dt \le T_{\text{end}} - t$.

    Definition at line 70 of file discrete_time.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-11-15 06:44:08.279482671 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDoFHandler.html 2024-11-15 06:44:08.279482671 +0000 @@ -433,7 +433,7 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    +class DoFHandler< dim, spacedim >

    Given a triangulation and a description of a finite element, this class enumerates degrees of freedom on all vertices, edges, faces, and cells of the triangulation. As a result, it also provides a basis for a discrete space $V_h$ whose elements are finite element functions defined on each cell by a FiniteElement object. This class satisfies the MeshType concept requirements.

    It is first used in the step-2 tutorial program.

    For each 0d, 1d, 2d, and 3d subobject, this class stores a list of the indices of degrees of freedom defined on this DoFHandler. These indices refer to the unconstrained degrees of freedom, i.e. constrained degrees of freedom are numbered in the same way as unconstrained ones, and are only later eliminated. This leads to the fact that indices in global vectors and matrices also refer to all degrees of freedom and some kind of condensation is needed to restrict the systems of equations to the unconstrained degrees of freedom only. The actual layout of storage of the indices is described in the internal::DoFHandlerImplementation::DoFLevel class documentation.

    The class offers iterators to traverse all cells, in much the same way as the Triangulation class does. Using the begin() and end() functions (and companions, like begin_active()), one can obtain iterators to walk over cells, and query the degree of freedom structures as well as the triangulation data. These iterators are built on top of those of the Triangulation class, but offer the additional information on degrees of freedom functionality compared to pure triangulation iterators. The order in which dof iterators are presented by the ++ and -- operators is the same as that for the corresponding iterators traversing the triangulation on which this DoFHandler is constructed.

    @@ -450,7 +450,7 @@

    Like many other classes in deal.II, the DoFHandler class can stream its contents to an archive using BOOST's serialization facilities. The data so stored can later be retrieved again from the archive to restore the contents of this object. This facility is frequently used to save the state of a program to disk for possible later resurrection, often in the context of checkpoint/restart strategies for long running computations or on computers that aren't very reliable (e.g. on very large clusters where individual nodes occasionally fail and then bring down an entire MPI job).

    The model for doing so is similar for the DoFHandler class as it is for the Triangulation class (see the section in the general documentation of that class). In particular, the load() function does not exactly restore the same state as was stored previously using the save() function. Rather, the function assumes that you load data into a DoFHandler object that is already associated with a triangulation that has a content that matches the one that was used when the data was saved. Likewise, the load() function assumes that the current object is already associated with a finite element object that matches the one that was associated with it when data was saved; the latter can be achieved by calling DoFHandler::distribute_dofs() using the same kind of finite element before re-loading data from the serialization archive.

    hp-adaptive finite element methods

    -

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    +

    Instead of only using one particular FiniteElement on all cells, this class also allows for an enumeration of degrees of freedom on different finite elements on every cells. To this end, one assigns an active_fe_index to every cell that indicates which element within a collection of finite elements (represented by an object of type hp::FECollection) is the one that lives on this cell. The class then enumerates the degree of freedom associated with these finite elements on each cell of a triangulation and, if possible, identifies degrees of freedom at the interfaces of cells if they match. If neighboring cells have degrees of freedom along the common interface that do not immediate match (for example, if you have $Q_2$ and $Q_3$ elements meeting at a common face), then one needs to compute constraints to ensure that the resulting finite element space on the mesh remains conforming.

    The whole process of working with objects of this type is explained in step-27. Many of the algorithms this class implements are described in the hp-paper.

    Active FE indices and their behavior under mesh refinement

    The typical workflow for using this class is to create a mesh, assign an active FE index to every active cell, call DoFHandler::distribute_dofs(), and then assemble a linear system and solve a problem on this finite element space.

    @@ -999,7 +999,7 @@
    -

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    +

    Go through the triangulation and "distribute" the degrees of freedom needed for the given finite element. "Distributing" degrees of freedom involves allocating memory to store the indices on all entities on which degrees of freedom can be located (e.g., vertices, edges, faces, etc.) and to then enumerate all degrees of freedom. In other words, while the mesh and the finite element object by themselves simply define a finite element space $V_h$, the process of distributing degrees of freedom makes sure that there is a basis for this space and that the shape functions of this basis are enumerated in an indexable, predictable way.

    The exact order in which degrees of freedom on a mesh are ordered, i.e., the order in which basis functions of the finite element space are enumerated, is something that deal.II treats as an implementation detail. By and large, degrees of freedom are enumerated in the same order in which we traverse cells, but you should not rely on any specific numbering. In contrast, if you want a particular ordering, use the functions in namespace DoFRenumbering.

    This function is first discussed in the introduction to the step-2 tutorial program.

    Note
    This function makes a copy of the finite element given as argument, and stores it as a member variable, similarly to the above function set_fe().
    /usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-11-15 06:44:08.319483029 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classDynamicSparsityPattern.html 2024-11-15 06:44:08.319483029 +0000 @@ -1119,7 +1119,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix.

    Definition at line 566 of file dynamic_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-11-15 06:44:08.343483244 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenInverse.html 2024-11-15 06:44:08.343483244 +0000 @@ -204,7 +204,7 @@
    template<typename VectorType = Vector<double>>
    class EigenInverse< VectorType >

    Inverse iteration (Wieland) for eigenvalue computations.

    This class implements an adaptive version of the inverse iteration by Wieland.

    -

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    +

    There are two choices for the stopping criterion: by default, the norm of the residual $A x - l x$ is computed. Since this might not converge to zero for non-symmetric matrices with non-trivial Jordan blocks, it can be replaced by checking the difference of successive eigenvalues. Use AdditionalData::use_residual for switching this option.

    Usually, the initial guess entering this method is updated after each step, replacing it with the new approximation of the eigenvalue. Using a parameter AdditionalData::relaxation between 0 and 1, this update can be damped. With relaxation parameter 0, no update is performed. This damping allows for slower adaption of the shift value to make sure that the method converges to the eigenvalue closest to the initial guess. This can be aided by the parameter AdditionalData::start_adaption, which indicates the first iteration step in which the shift value should be adapted.

    Definition at line 128 of file eigen.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-11-15 06:44:08.367483458 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEigenPower.html 2024-11-15 06:44:08.367483458 +0000 @@ -203,7 +203,7 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class EigenPower< VectorType >

    Power method (von Mises) for eigenvalue computations.

    -

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    +

    This method determines the largest eigenvalue of a matrix by applying increasing powers of this matrix to a vector. If there is an eigenvalue $l$ with dominant absolute value, the iteration vectors will become aligned to its eigenspace and $Ax = lx$.

    A shift parameter allows to shift the spectrum, so it is possible to compute the smallest eigenvalue, too.

    Convergence of this method is known to be slow.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-11-15 06:44:08.407483815 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classEllipticalManifold.html 2024-11-15 06:44:08.407483815 +0000 @@ -233,15 +233,15 @@
    template<int dim, int spacedim = dim>
    class EllipticalManifold< dim, spacedim >

    Elliptical manifold description derived from ChartManifold. More information on the elliptical coordinate system can be found at Wikipedia .

    This is based on the definition of elliptic coordinates $(u,v)$

    -\[
+<picture><source srcset=\[
  \left\lbrace\begin{aligned}
  x &=  x_0 + c \cosh(u) \cos(v) \\
  y &=  y_0 + c \sinh(u) \sin(v)
  \end{aligned}\right.
-\] +\]" src="form_1530.png"/>

    -

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    -

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    +

    in which $(x_0,y_0)$ are coordinates of the center of the cartesian system.

    +

    The current implementation uses coordinates $(c,v)$, instead of $(u,v)$, and fixes $u$ according to a given eccentricity. Therefore, this choice of coordinates generates an elliptical manifold characterized by a constant eccentricity: $e=\frac{1}{\cosh(u)}$, with $e\in\left]0,1\right[$.

    The constructor of this class will throw an exception if both dim and spacedim are different from two.

    This manifold can be used to produce hyper_shells with elliptical curvature. As an example, the test elliptical_manifold_01 produces the following triangulation:

    @@ -352,7 +352,7 @@ - +
    centerCenter of the manifold.
    major_axis_directionDirection of the major axis of the manifold.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    eccentricityEccentricity of the manifold $e\in\left]0,1\right[$.
    @@ -489,7 +489,7 @@

    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -614,7 +614,7 @@

    Return the periodicity associated with the submanifold.

    -

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    +

    For $\text{dim}=2$ and $\text{spacedim}=2$, the first coordinate is non-periodic, while the second coordinate has a periodicity of $2\pi$.

    Definition at line 1221 of file manifold_lib.cc.

    @@ -830,7 +830,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -862,24 +862,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -888,11 +888,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-11-15 06:44:08.447484172 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFECouplingValues.html 2024-11-15 06:44:08.447484172 +0000 @@ -178,44 +178,44 @@ class FECouplingValues< dim1, dim2, spacedim >

    FECouplingValues is a class that facilitates the integration of finite element data between two different finite element objects, possibly living on different grids, and with possibly different topological dimensions (i.e., cells, faces, edges, and any combination thereof).

    This class provides a way to simplify the implementation of the following abstract operation:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
-\] +\]" src="form_1096.png"/>

    for three different types of Kernels $K$:

      -
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • -
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.
    • +
    • $K(x_1, x_2)$ is a non-singular Kernel function, for example, it is a function of positive powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a singular Kernel function, for example, it is a function of negative powers $\alpha$ of the distance between the quadrature points $|x_1-x_2|^\alpha$;
    • +
    • $K(x_1, x_2)$ is a Dirac delta distribution $\delta(x_1-x_2)$, such that the integral above is actually a single integral over the intersection of the two sets $T_1$ and $T_2$.

    For the first case, one may think that the only natural way to proceed is to compute the double integral by simply nesting two loops:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{q_1} \sum_{q_2} K(x_1^{q_1}, x_2^{q_2}) \phi^1_i(x_1^{q_1})
 \phi^2_j(x_2^{q_2}) w_1^{q_1} w_2^{q_2},
-\] +\]" src="form_1100.png"/>

    -

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    -

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
-T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    +

    where $x_1^{q_1}$ and $x_2^{q_2}$ are the quadrature points in $T_1$ and $T_2$ respectively, and $w_1^{q_1}$ and $w_2^{q_2}$ are the corresponding quadrature weights.

    +

    This, however is not the only way to proceed. In fact, such an integral can be rewritten as a single loop over corresponding elements of two arrays of points with the same length that can be thought of as a single quadrature rule on the set $T_1\times T_2$. For singular kernels, for example, this is often the only way to proceed, since the quadrature formula on $T_1\times
+T_2$ is usually not written as a tensor product quadrature formula, and one needs to build a custom quadrature formula for this purpose.

    This class allows one to treat the three cases above in the same way, and to approximate the integral as follows:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) \phi^1_i(x_1) \phi^2_j(x_2) dT_1 dT_2
 \approx \sum_{i=1}^{N_q} K(x_1^{i}, x_2^{i}) \phi^1_i(x_1^{i})
 \phi^2_j(x_2^{i}) w_1^{i} w_2^i,
-\] +\]" src="form_1107.png"/>

    -

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    -

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    +

    Since the triple of objects $(\{q\}, \{w\}, \{\phi\})$ is usually provided by a class derived from the FEValuesBase class, this is the type that the class needs at construction time. $T_1$ and $T_2$ can be two arbitrary cells, faces, or edges belonging to possibly different meshes (or to meshes with different topological dimensions), $\phi^1_i$ and $\phi^2_j$ are basis functions defined on $T_1$ and $T_2$, respectively.

    +

    The case of the Dirac distribution is when $T_1$ and $T_2$ correspond to the common face of two neighboring cells. In this case, this class provides a functionality which is similar to the FEInterfaceValues class, and gives you a way to access values of basis functions on the neighboring cells, as well as their gradients and Hessians, in a unified fashion, on the face.

    Similarly, this class can be used to couple bulk and surface meshes across the faces of the bulk mesh. In this case, the two FEValuesBase objects will have different topological dimension (i.e., one will be a cell in a co-dimension one triangulation, and the other a face of a bulk grid with co-dimension zero), and the QuadratureCouplingType argument is usually chosen to be QuadratureCouplingType::reorder, since the quadrature points of the two different FEValuesBase objects are not necessarily generated with the same ordering.

    The type of integral to compute is controlled by the QuadratureCouplingType argument (see the documentation of that enum class for more details), while the type degrees of freedom coupling is controlled by the DoFCouplingType argument (see the documentation of that enum class for more details).

    As an example usage of this class, consider the a bilinear form of the form:

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K_1(x_1, x_2) v_i(x_1) u_j(x_2) dT_1 dT_2 +
 \int_{T_1} \int{T_2} K_2(x_1, x_2) p_i(x_1) q_j(x_2) dT_1 dT_2
-\] +\]" src="form_1111.png"/>

    -

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    +

    where the finite dimensional space has two scalar components. We indicate with $v_i$ and $p_i$ the trial functions, and with $u_j$ and $q_j$ the corresponding test functions. $K_1$ and $K_2$ are coupling kernels: such a formulation is used, for example, to write the bilinear forms of Galerkin boundary element methods.

    The corresponding implementation would look like the following:

    ... // double loop over cells that yields cell_1 and cell_2
    @@ -341,9 +341,9 @@

    Construct the FECouplingValues with two arbitrary FEValuesBase objects. This class assumes that the FEValuesBase objects that are given at construction time are initialized and ready to use (i.e., that you have called the reinit() function on them before calling this constructor).

    Notice that the actual renumbering of the degrees of freedom and quadrature points is done at construction time, or upon calling the reinit() function. If you change the underlying FEValuesBase objects after construction, you must call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -393,9 +393,9 @@

    Reinitialize the FECouplingValues with two arbitrary FEValuesBase objects. The FEValuesBase objects must be initialized and ready to use, i.e., you must have called the reinit() function on them before calling this method.

    This method computes the actual renumbering of the degrees of freedom and quadrature points. If you change the underlying FEValuesBase objects after calling this method, you may need to call the reinit() function to update the renumbering. This may or may not be necessary, depending on the type of coupling that you are using.

    This really depends on the application and on the specific type of coupling. For example, for volume/volume coupling, i.e., for operators with non-local and non-singular kernels of type

    -\[
+<picture><source srcset=\[
 \int_K \int_T f(\phi_i(x)-\phi_j(y), x-y) dx dy
-\] +\]" src="form_1116.png"/>

    you may initialize FECouplingValues once, and just reinit the underlying FEValuesBase objects on different cells K and T, without the need to recompute the coupling (i.e., the numbering is always the same, and nothing differs from what happened in the first call).

    For cell/surface coupling, the same cell may couple with different faces, so the renumbering must be really computed from scratch for each pair of FEValuesBase objects, so reinitializing the underlying cells and faces will make the renumbering itself invalid, and FECouplingValues must be reinitialized (o constructed from scratch) after calling fe_values_1.reinit(K) and fe_values_1.reinit(T).

    @@ -492,8 +492,8 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
-T_2$.

    +

    Return the two quadrature points in real space at the given quadrature point index, corresponding to a quadrature point in the set $T_1\times
+T_2$.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_quadrature_points flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-11-15 06:44:08.547485066 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluation.html 2024-11-15 06:44:08.547485066 +0000 @@ -464,7 +464,7 @@
    cell_index
    unsigned int cell_index
    Definition grid_tools_topology.cc:783
    EvaluationFlags::values
    @ values
    Definition evaluation_flags.h:50

    Likewise, a gradient of the finite element solution represented by vector can be interpolated to the quadrature points by fe_eval.get_gradient(q). The combination of read_dof_values(), evaluate() and get_value() is similar to what FEValues::get_function_values or FEValues::get_function_gradients does, but it is in general much faster because it makes use of the tensor product, see the description of the evaluation routines below, and can do this operation for several cells at once through vectorization.

    -

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    +

    The second class of tasks done by FEEvaluation are integration tasks for right hand sides. In finite element computations, these typically consist of multiplying a quantity on quadrature points (a function value, or a field interpolated by the finite element space itself) by a set of test functions and integrating over the cell through summation of the values in each quadrature point, multiplied by the quadrature weight and the Jacobian determinant of the transformation. If a generic Function object is given and we want to compute $v_i = \int_\Omega \varphi_i f dx$, this is done by the following cell-wise integration:

    FEEvaluation<dim,fe_degree> fe_eval(matrix_free);
    Function<dim> &function = ...;
    for (unsigned int cell_index = cell_range.first;
    @@ -1790,8 +1790,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2115,7 +2115,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2487,8 +2487,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-11-15 06:44:08.631485816 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationBase.html 2024-11-15 06:44:08.631485816 +0000 @@ -1131,8 +1131,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -1402,7 +1402,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -1801,8 +1801,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-11-15 06:44:08.691486352 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEEvaluationData.html 2024-11-15 06:44:08.691486352 +0000 @@ -785,8 +785,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-11-15 06:44:08.791487245 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceEvaluation.html 2024-11-15 06:44:08.791487245 +0000 @@ -1717,8 +1717,8 @@
    -

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
-x_q)$.

    +

    Return the derivative of a finite element function interpolated to the quadrature point with index q_point after a call to evaluate(EvaluationFlags::gradients) the direction normal to the face: $\boldsymbol \nabla u(\mathbf x_q) \cdot \mathbf n(\mathbf
+x_q)$.

    This call is equivalent to calling get_gradient() * normal_vector() but will use a more efficient internal representation of data.

    @@ -2078,7 +2078,7 @@
    -

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    +

    Return the curl of the vector field, $\nabla \times v$ interpolated to the quadrature point index after calling evaluate(EvaluationFlags::gradients).

    Note
    Only available for the vector-valued case (n_components == dim) in 2 and 3 dimensions.
    @@ -2472,8 +2472,8 @@
    -

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
-T} \hat{\nabla} u_h$.

    +

    Return the inverse and transposed version $J^{-\mathrm T}$ of the Jacobian of the mapping between the unit to the real cell defined as $J_{ij} = d x_i / d\hat x_j$. The $(i,j)$ entry of the returned tensor contains $d\hat x_j/dx_i$, i.e., columns refer to reference space coordinates and rows to real cell coordinates. Thus, the returned tensor represents a covariant transformation, which is used in the FEEvaluationBase::get_gradient() function to transform the unit cell gradients to gradients on the real cell by a multiplication $J^{-\mathrm
+T} \hat{\nabla} u_h$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-11-15 06:44:08.947488638 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValues.html 2024-11-15 06:44:08.947488638 +0000 @@ -963,7 +963,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1004,7 +1004,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1043,7 +1043,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1251,11 +1251,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 716 of file fe_values_base.cc.

    @@ -1289,7 +1289,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 763 of file fe_values_base.cc.

    @@ -1466,11 +1466,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1506,7 +1506,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 898 of file fe_values_base.cc.

    @@ -1625,11 +1625,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1670,7 +1670,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1001 of file fe_values_base.cc.

    @@ -1789,11 +1789,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1831,7 +1831,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1994,11 +1994,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2039,7 +2039,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1234 of file fe_values_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-11-15 06:44:09.039489460 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEFaceValuesBase.html 2024-11-15 06:44:09.039489460 +0000 @@ -667,7 +667,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -706,7 +706,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -743,7 +743,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -937,11 +937,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 487 of file fe_values_base.cc.

    @@ -971,7 +971,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 505 of file fe_values_base.cc.

    @@ -1132,11 +1132,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1168,7 +1168,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 683 of file fe_values_base.cc.

    @@ -1275,11 +1275,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1316,7 +1316,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 786 of file fe_values_base.cc.

    @@ -1423,11 +1423,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1461,7 +1461,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1608,11 +1608,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1649,7 +1649,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1006 of file fe_values_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-11-15 06:44:09.095489960 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceValues.html 2024-11-15 06:44:09.095489960 +0000 @@ -499,9 +499,9 @@
  • If the q_index and mapping_index arguments to this function are explicitly specified (rather than leaving them at their default values), then these indices will be used to select which element of the hp::QCollection and hp::MappingCollection passed to the constructor should serve as the quadrature and mapping to be used.
  • If one of these arguments is left at its default value, then the function will need to choose a quadrature and/or mapping that is appropriate for the two finite element spaces used on the two cells adjacent to the current interface. As the first choice, if the quadrature or mapping collection we are considering has only one element, then that is clearly the one that should be used.
  • If the quadrature or mapping collection have multiple elements, then we need to dig further. For quadrature objects, we can compare whether the two quadrature objects that correspond to the active_fe_index values of the two adjacent cells are identical (i.e., have quadrature points at the same locations, and have the same weights). If this is so, then it does not matter which one of the two we take, and we choose one or the other.
  • -
  • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
      -
    • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
    • -
    • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • +
    • If this has still not helped, we try to find out which of the two finite element spaces on the two adjacent cells is "larger" (say, if you had used $Q_2$ and $Q_4$ elements on the two adjacent cells, then the $Q_4$ element is the larger one); the determination of which space is "larger" is made using the hp::FECollection::find_dominated_fe() function, which is not necessarily intended for this kind of query, but yields a result that serves just fine for our purposes here. We then operate on the assumption that the quadrature object associated with the "larger" of the two spaces is the appropriate one to use for the face that separates these two spaces.
        +
      • If this function returns that one of the two elements in question is dominated by the other, then presumably it is "larger" one and we take the quadrature formula and mapping that corresponds to this "larger" element is. For example, for the $Q_2$ element mentioned above, one would generally use a QGauss(3) quadrature formula, whereas for the $Q_4$ element, one would use QGauss(5). To integrate jump and average terms on the interface between cells using these two elements, QGauss(5) is appropriate. Because, typically, people will order elements in the hp::FECollection in the same order as the quadrature and mapping objects in hp::QCollection and hp::MappingCollection, this function will use the index of the "larger" element in the hp::FECollection to also index into the hp::QCollection and hp::MappingCollection to retrieve quadrature and mapping objects appropriate for the current face.
      • +
      • There are cases where neither element dominates the other. For example, if one uses $Q_2\times Q_1$ and $Q_1\times Q_2$ elements on neighboring cells, neither of the two spaces dominates the other – or, in the context of the current function, neither space is "larger" than the other. In that case, there is no way for the current function to determine quadrature and mapping objects associated with the two elements are the appropriate ones. If that happens, you will get an error – and the only way to avoid the error is to explicitly specify for these interfaces which quadrature and mapping objects you want to use, by providing non-default values for the q_index and mapping_index arguments to this function.
    • @@ -837,7 +837,7 @@
  • Mapped quadrature weight. This value equals the mapped surface element times the weight of the quadrature point.

    -

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    +

    You can think of the quantity returned by this function as the surface element $ds$ in the integral that we implement here by quadrature.

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_JxW_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1107,9 +1107,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    Return the jump $\jump{u}=u_{\text{cell0}} - u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    Note that one can define the jump in different ways (the value "there" minus the value "here", or the other way around; both are used in the finite element literature). The definition here uses "value here minus value there", as seen from the first cell.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{u}=u_{\text{cell0}}$, that is "the value here (minus zero)".

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1138,9 +1138,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
-\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}} -
+\nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -1169,9 +1169,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
-u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    +

    Return the jump in the Hessian $\jump{\nabla^2 u} = \nabla^2
+u_{\text{cell0}} - \nabla^2 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^2 u} = \nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1200,9 +1200,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\jump{\nabla^3 u} = \nabla^3 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the derivative) of the shape function (singular)".
    @@ -1231,9 +1231,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
-\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    +

    Return the average $\average{u}=\frac{1}{2}u_{\text{cell0}} +
+\frac{1}{2}u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{u}=u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -1262,9 +1262,9 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
-u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    +

    Return the average of the gradient $\average{\nabla u} = \frac{1}{2}\nabla
+u_{\text{cell0}} + \frac{1}{2} \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla u}=\nabla u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values for the gradient) of the shape function (singular)".
    @@ -1293,10 +1293,10 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    -

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of component component.

    +

    If this is a boundary face (at_boundary() returns true), then $\average{\nabla^2 u}=\nabla^2 u_{\text{cell0}}$.

    Note
    The name of the function is supposed to be read as "the average (singular) of the Hessians (plural: one or two possible values for the second derivatives) of the shape function (singular)".
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-11-15 06:44:09.131490281 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Scalar.html 2024-11-15 06:44:09.131490281 +0000 @@ -475,7 +475,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump $\jump{u}=u_1 - u_2$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -499,7 +499,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the jump of the gradient $\jump{nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -523,8 +523,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -548,8 +548,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -573,7 +573,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average value $\average{u}=\frac{1}{2}(u_1 + u_2)$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -597,7 +597,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    +

    Return the average of the gradient $\average{\nabla u}$ on the interface for the shape function interface_dof_index in the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -621,9 +621,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -659,7 +659,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -732,7 +732,7 @@

    Return the jump in the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -786,7 +786,7 @@

    Return the jump in the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -840,7 +840,7 @@

    Return the jump in the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -894,7 +894,7 @@

    Return the jump in the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -948,7 +948,7 @@

    Return the average of the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1002,7 +1002,7 @@

    Return the average of the gradients of the selected scalar components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1056,7 +1056,7 @@

    Return the average of the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-11-15 06:44:09.167490603 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEInterfaceViews_1_1Vector.html 2024-11-15 06:44:09.167490603 +0000 @@ -480,7 +480,7 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump vector $[\mathbf{u}]=\mathbf{u_1} - \mathbf{u_2}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the values (plural: one or two possible values) of the shape function (singular)".
    @@ -504,8 +504,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the jump of the gradient (a tensor of rank 2) $\jump{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the gradients (plural: one or two possible gradients) of the shape function (singular)".
    @@ -551,8 +551,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
-- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the gradient $\jump{\nabla u}=\nabla u_{\text{cell0}}
+- \nabla u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the Hessians (plural: one or two possible values for the second derivative) of the shape function (singular)".
    @@ -598,8 +598,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
-u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +

    Return the jump in the third derivative $\jump{\nabla^3 u} = \nabla^3
+u_{\text{cell0}} - \nabla^3 u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the jump (singular) in the third derivatives (plural: one or two possible values for the third derivative) of the shape function (singular)".
    @@ -623,8 +623,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
-+ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average vector $\average{\mathbf{u}}=\frac{1}{2}(\mathbf{u_1}
++ \mathbf{u_2})$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the values (plural: one or two possible values) of the shape function (singular)".
    @@ -648,8 +648,8 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
-\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    +

    Return the average of the gradient (a tensor of rank 2) $\average{\nabla
+\mathbf{u}}$ on the interface for the shape function interface_dof_index in the quadrature point q_point.

    Note
    The name of the function is supposed to be read as "the average (singular) of the gradients (plural: one or two possible values of the derivative) of the shape function (singular)".
    @@ -673,9 +673,9 @@ const unsigned int q_point&#href_anchor"memdoc"> -

    Return the average of the Hessian $\average{\nabla^2 u} =
+<p>Return the average of the Hessian   <picture><source srcset=$\average{\nabla^2 u} =
 \frac{1}{2}\nabla^2 u_{\text{cell0}} + \frac{1}{2} \nabla^2
-u_{\text{cell1}}$ on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    +u_{\text{cell1}}$" src="form_1170.png"/> on the interface for the shape function interface_dof_index at the quadrature point q_point of the component selected by this view.

    Note
    The name of the function is supposed to be read as "the average (singular) in the Hessians (plural: one or two possible values of the second derivative) of the shape function (singular)".
    @@ -733,7 +733,7 @@

    Return the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    The argument here_or_there selects between the value on cell 0 (here, true) and cell 1 (there, false). You can also interpret it as "upstream" (true) and "downstream" (false) as defined by the direction of the normal vector in this quadrature point. If here_or_there is true, the values from the first cell of the interface is used.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -806,7 +806,7 @@

    Return the jump in the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -860,7 +860,7 @@

    Return the jump in the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -914,7 +914,7 @@

    Return the jump in the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -968,7 +968,7 @@

    Return the jump in the third derivatives of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1022,7 +1022,7 @@

    Return the average of the values of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1076,7 +1076,7 @@

    Return the average of the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell interface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1130,7 +1130,7 @@

    Return the average of the Hessians of the selected vector component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEInterfaceValues object was called.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-11-15 06:44:09.311491889 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Fourier.html 2024-11-15 06:44:09.311491889 +0000 @@ -212,25 +212,25 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESeries::Fourier< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    -\[
+class FESeries::Fourier< dim, spacedim ></div><p>A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into <a class=Fourier series on a reference element. The exponential form of the Fourier series is based on completeness and Hermitian orthogonality of the set of exponential functions $ \phi_{\bf k}({\bf x}) = \exp(2 \pi i\, {\bf k} \cdot {\bf x})$. For example in 1d the L2-orthogonality condition reads

    +\[
   \int_0^1 \phi_k(x) \phi_l^\ast(x) dx=\delta_{kl}.
-\] +\]" src="form_1252.png"/>

    -

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    +

    Note that $ \phi_{\bf k} = \phi_{-\bf k}^\ast $.

    The arbitrary scalar FE field on the reference element can be expanded in the complete orthogonal exponential basis as

    -\[
+<picture><source srcset=\[
    u({\bf x})
    = \sum_{\bf k} c_{\bf k} \phi_{\bf k}({\bf x}).
-\] +\]" src="form_1254.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_{\bf k} =
    \int_{[0,1]^d} u({\bf x}) \phi_{\bf k}^\ast ({\bf x}) d{\bf x}\,.
-\] +\]" src="form_1255.png"/>

    -

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    +

    It is this complex-valued expansion coefficients, that are calculated by this class. Note that $ u({\bf x}) = \sum_i u_i N_i({\bf x})$, where $ N_i({\bf x}) $ are real-valued FiniteElement shape functions. Consequently $ c_{\bf k} \equiv c_{-\bf k}^\ast $ and we only need to compute $ c_{\bf k} $ for positive indices $ \bf k $ .

    Definition at line 89 of file fe_series.h.

    Member Typedef Documentation

    @@ -835,7 +835,7 @@
    -

    Angular frequencies $ 2 \pi {\bf k} $ .

    +

    Angular frequencies $ 2 \pi {\bf k} $ .

    Definition at line 195 of file fe_series.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-11-15 06:44:09.339492139 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESeries_1_1Legendre.html 2024-11-15 06:44:09.339492139 +0000 @@ -209,39 +209,39 @@
    template<int dim, int spacedim = dim>
    class FESeries::Legendre< dim, spacedim >

    A class to calculate expansion of a scalar FE (or a single component of vector-valued FE) field into series of Legendre functions on a reference element.

    Legendre functions are solutions to Legendre's differential equation

    -\[
+<picture><source srcset=\[
    \frac{d}{dx}\left([1-x^2] \frac{d}{dx} P_n(x)\right) +
    n[n+1] P_n(x) = 0
-\] +\]" src="form_1261.png"/>

    and can be expressed using Rodrigues' formula

    -\[
+<picture><source srcset=\[
    P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n}[x^2-1]^n.
-\] +\]" src="form_1262.png"/>

    -

    These polynomials are orthogonal with respect to the $ L^2 $ inner product on the interval $ [-1;1] $

    -\[
+<p> These polynomials are orthogonal with respect to the <picture><source srcset=$ L^2 $ inner product on the interval $ [-1;1] $

    +\[
    \int_{-1}^1 P_m(x) P_n(x) = \frac{2}{2n + 1} \delta_{mn}
-\] +\]" src="form_1265.png"/>

    -

    and are complete. A family of $ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    -\[
+<p> and are complete. A family of <picture><source srcset=$ L^2 $-orthogonal polynomials on $ [0;1] $ can be constructed via

    +\[
    \widetilde P_m = \sqrt{2} P_m(2x-1).
-\] +\]" src="form_1267.png"/>

    -

    An arbitrary scalar FE field on the reference element $ [0;1] $ can be expanded in the complete orthogonal basis as

    -\[
+<p>An arbitrary scalar FE field on the reference element <picture><source srcset=$ [0;1] $ can be expanded in the complete orthogonal basis as

    +\[
    u(x)
    = \sum_{m} c_m \widetilde P_{m}(x).
-\] +\]" src="form_1268.png"/>

    From the orthogonality property of the basis, it follows that

    -\[
+<picture><source srcset=\[
    c_m = \frac{2m+1}{2}
    \int_0^1 u(x) \widetilde P_m(x) dx .
-\] +\]" src="form_1269.png"/>

    -

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    +

    This class calculates coefficients $ c_{\bf k} $ using $ dim $-dimensional Legendre polynomials constructed from $ \widetilde P_m(x) $ using tensor product rule.

    Definition at line 259 of file fe_series.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-11-15 06:44:09.431492961 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESubfaceValues.html 2024-11-15 06:44:09.431492961 +0000 @@ -991,7 +991,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -1032,7 +1032,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -1071,7 +1071,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1279,11 +1279,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 716 of file fe_values_base.cc.

    @@ -1317,7 +1317,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 763 of file fe_values_base.cc.

    @@ -1494,11 +1494,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1534,7 +1534,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 898 of file fe_values_base.cc.

    @@ -1653,11 +1653,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1698,7 +1698,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1001 of file fe_values_base.cc.

    @@ -1817,11 +1817,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1859,7 +1859,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2022,11 +2022,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -2067,7 +2067,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1234 of file fe_values_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-11-15 06:44:09.575494247 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFESystem.html 2024-11-15 06:44:09.575494247 +0000 @@ -520,11 +520,11 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    +class FESystem< dim, spacedim >

    This class provides an interface to group several elements together into one, vector-valued element. As example, consider the Taylor-Hood element that is used for the solution of the Stokes and Navier-Stokes equations: There, the velocity (of which there are as many components as the dimension $d$ of the domain) is discretized with $Q_2$ elements and the pressure with $Q_1$ elements. Mathematically, the finite element space for the coupled problem is then often written as $V_h = Q_2^d \times Q_1$ where the exponentiation is understood to be the tensor product of spaces – i.e., in 2d, we have $V_h=Q_2\times Q_2\times Q_1$ – and tensor products lead to vectors where each component of the vector-valued function space corresponds to a scalar function in one of the $Q_2$ or $Q_1$ spaces. Using the FESystem class, this space is created using

    FESystem<dim> taylor_hood_fe (FE_Q<dim>(2)^dim, // velocity components
    FE_Q<dim>(1)); // pressure component
    Definition fe_q.h:554
    -

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    +

    The creation of this element here corresponds to taking tensor-product powers of the $Q_2$ element in the first line of the list of arguments to the FESystem constructor, and then concatenation via another tensor product with the element in the second line. This kind of construction is used, for example, in the step-22 tutorial program.

    Similarly, step-8 solves an elasticity equation where we need to solve for the displacement of a solid object. The displacement again has $d$ components if the domain is $d$-dimensional, and so the combined finite element is created using

    FESystem<dim> displacement_fe (FE_Q<dim>(1)^dim);

    where now each (vector) component of the combined element corresponds to a $Q_1$ space.

    To the outside world, FESystem objects look just like a usual finite element object, they just happen to be composed of several other finite elements that are possibly of different type. These "base elements" can themselves have multiple components and, in particular, could also be vector-valued – for example, if one of the base elements is an FESystem itself (see also below). An example is given in the documentation of namespace FETools::Compositing, when using the "tensor product" strategy.

    @@ -3817,7 +3817,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3919,7 +3919,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4123,8 +4123,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-11-15 06:44:09.667495068 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValues.html 2024-11-15 06:44:09.667495068 +0000 @@ -761,7 +761,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -800,7 +800,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -837,7 +837,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -1031,11 +1031,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 487 of file fe_values_base.cc.

    @@ -1065,7 +1065,7 @@

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 505 of file fe_values_base.cc.

    @@ -1226,11 +1226,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1262,7 +1262,7 @@

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 683 of file fe_values_base.cc.

    @@ -1369,11 +1369,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1410,7 +1410,7 @@

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 786 of file fe_values_base.cc.

    @@ -1517,11 +1517,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1555,7 +1555,7 @@

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1702,11 +1702,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1743,7 +1743,7 @@

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1006 of file fe_values_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-11-15 06:44:09.759495890 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesBase.html 2024-11-15 06:44:09.759495890 +0000 @@ -650,7 +650,7 @@

    If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
    q_pointNumber of the quadrature point at which function is to be evaluated
    @@ -684,7 +684,7 @@

    Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    componentvector component to be evaluated.
    @@ -716,7 +716,7 @@

    The same holds for the arguments of this function as for the shape_value() function.

    Parameters
    - +
    iNumber of the shape function $\varphi_i$ to be evaluated.
    iNumber of the shape function $\varphi_i$ to be evaluated.
    q_pointNumber of the quadrature point at which function is to be evaluated.
    @@ -882,11 +882,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
    -
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 716 of file fe_values_base.cc.

    @@ -913,7 +913,7 @@ std::vector< Vector< Number > > & values&#href_anchor"memdoc">

    This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 763 of file fe_values_base.cc.

    @@ -1061,11 +1061,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    +
    Postcondition
    gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1094,7 +1094,7 @@ std::vector< std::vector< Tensor< 1, spacedim, Number > > > & gradients&#href_anchor"memdoc">

    This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 898 of file fe_values_base.cc.

    @@ -1192,11 +1192,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    +
    Postcondition
    hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1230,7 +1230,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1001 of file fe_values_base.cc.

    @@ -1328,11 +1328,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
    -
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    @@ -1363,7 +1363,7 @@ std::vector< Vector< Number > > & laplacians&#href_anchor"memdoc">

    This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    +
    Postcondition
    laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
    For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1497,11 +1497,11 @@
    Parameters
    - +
    [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
    -
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    +
    Postcondition
    third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
    Note
    The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -1535,7 +1535,7 @@ const bool quadrature_points_fastest = false&#href_anchor"memdoc">

    This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

    -
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    +
    Postcondition
    third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1234 of file fe_values_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-11-15 06:44:09.783496104 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1RenumberedView.html 2024-11-15 06:44:09.783496104 +0000 @@ -377,7 +377,7 @@

    Return the values of the underlying view characterized by fe_function at the renumbered quadrature points.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected view.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -446,7 +446,7 @@

    Return the gradients of the underlying view characterized by fe_function at the renumbered quadrature points.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected view.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., value_type) times the type used to store the gradients of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-11-15 06:44:09.819496426 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Scalar.html 2024-11-15 06:44:09.819496426 +0000 @@ -708,7 +708,7 @@

    Return the values of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 361 of file fe_values_views.cc.

    @@ -781,7 +781,7 @@

    Return the gradients of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 412 of file fe_values_views.cc.

    @@ -840,7 +840,7 @@

    Return the Hessians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 464 of file fe_values_views.cc.

    @@ -899,7 +899,7 @@

    Return the Laplacians of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 516 of file fe_values_views.cc.

    @@ -958,7 +958,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 568 of file fe_values_views.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:09.843496640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1SymmetricTensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:09.843496640 +0000 @@ -497,7 +497,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1049 of file fe_values_views.cc.

    @@ -571,7 +571,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1099 of file fe_values_views.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:09.871496890 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Tensor_3_012_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:09.871496890 +0000 @@ -603,7 +603,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1153 of file fe_values_views.cc.

    @@ -677,7 +677,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    See the general discussion of this class for a definition of the divergence.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1203 of file fe_values_views.cc.

    @@ -736,7 +736,7 @@

    Return the gradient of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    See the general discussion of this class for a definition of the gradient.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 1256 of file fe_values_views.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-11-15 06:44:09.915497284 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFEValuesViews_1_1Vector.html 2024-11-15 06:44:09.915497284 +0000 @@ -820,7 +820,7 @@ const unsigned int q_point&#href_anchor"memdoc">

    Return the symmetric gradient (a symmetric tensor of rank 2) of the vector component selected by this view, for the shape function and quadrature point selected by the arguments.

    The symmetric gradient is defined as $\frac 12 [(\nabla \phi_i(x_q)) +
-(\nabla \phi_i(x_q))^T]$, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    +(\nabla \phi_i(x_q))^T]$" src="form_1335.png"/>, where $\phi_i$ represents the dim components selected from the FEValuesBase object, and $x_q$ is the location of the $q$-th quadrature point.

    Note
    The meaning of the arguments is as documented for the value() function.
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
    @@ -958,7 +958,7 @@

    Return the values of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_values function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the values of shape functions (i.e., value_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 622 of file fe_values_views.cc.

    @@ -1031,7 +1031,7 @@

    Return the gradients of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_gradients function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the gradients of shape functions (i.e., gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 672 of file fe_values_views.cc.

    @@ -1092,7 +1092,7 @@

    The symmetric gradient of a vector field $\mathbf v$ is defined as $\varepsilon(\mathbf v)=\frac 12 (\nabla \mathbf v + \nabla \mathbf
 v^T)$.

    Note
    There is no equivalent function such as FEValuesBase::get_function_symmetric_gradients in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.
    -

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the symmetric gradients of shape functions (i.e., symmetric_gradient_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 724 of file fe_values_views.cc.

    @@ -1151,7 +1151,7 @@

    Return the divergence of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_divergences in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the divergences of shape functions (i.e., divergence_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 778 of file fe_values_views.cc.

    @@ -1210,7 +1210,7 @@

    Return the curl of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    There is no equivalent function such as FEValuesBase::get_function_curls in the FEValues classes but the information can be obtained from FEValuesBase::get_function_gradients, of course.

    -

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the curls of shape functions (i.e., curl_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 831 of file fe_values_views.cc.

    @@ -1269,7 +1269,7 @@

    Return the Hessians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_hessians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Hessians of shape functions (i.e., hessian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 883 of file fe_values_views.cc.

    @@ -1328,7 +1328,7 @@

    Return the Laplacians of the selected vector components of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called. The Laplacians are the trace of the Hessians.

    This function is the equivalent of the FEValuesBase::get_function_laplacians function but it only works on the selected vector components.

    -

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the Laplacians of shape functions (i.e., laplacian_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 935 of file fe_values_views.cc.

    @@ -1387,7 +1387,7 @@

    Return the third derivatives of the selected scalar component of the finite element function characterized by fe_function at the quadrature points of the cell, face or subface selected the last time the reinit function of the FEValues object was called.

    This function is the equivalent of the FEValuesBase::get_function_third_derivatives function but it only works on the selected scalar component.

    -

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    +

    The data type stored by the output vector must be what you get when you multiply the third derivatives of shape functions (i.e., third_derivative_type) times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).

    Note
    For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_third_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

    Definition at line 995 of file fe_values_views.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-11-15 06:44:10.043498427 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__ABF.html 2024-11-15 06:44:10.043498427 +0000 @@ -764,11 +764,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3374,7 +3374,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3476,7 +3476,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3766,8 +3766,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-11-15 06:44:10.175499606 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BDM.html 2024-11-15 06:44:10.175499606 +0000 @@ -740,11 +740,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3312,7 +3312,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3414,7 +3414,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3704,8 +3704,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-11-15 06:44:10.311500820 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__BernardiRaugel.html 2024-11-15 06:44:10.311500820 +0000 @@ -740,11 +740,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3282,7 +3282,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3384,7 +3384,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3674,8 +3674,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-11-15 06:44:10.443501999 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Bernstein.html 2024-11-15 06:44:10.443501999 +0000 @@ -2384,17 +2384,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2427,21 +2427,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3518,7 +3518,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3620,7 +3620,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3881,8 +3881,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3916,11 +3916,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-11-15 06:44:10.575503178 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGBDM.html 2024-11-15 06:44:10.575503178 +0000 @@ -3156,7 +3156,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3258,7 +3258,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3548,8 +3548,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3583,11 +3583,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-11-15 06:44:10.707504357 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGNedelec.html 2024-11-15 06:44:10.711504393 +0000 @@ -3156,7 +3156,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3258,7 +3258,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3548,8 +3548,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3583,11 +3583,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-11-15 06:44:10.839505536 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGP.html 2024-11-15 06:44:10.843505572 +0000 @@ -491,24 +491,24 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class FE_DGP< dim, spacedim >

    Discontinuous finite elements based on Legendre polynomials.

    -

    This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

    -

    The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

    +

    This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

    +

    The basis functions used in this element for the space described above are chosen to form a Legendre basis on the unit square, i.e., in particular they are $L_2$-orthogonal and normalized on the reference cell (but not necessarily on the real cell). As a consequence, the first basis function of this element is always the function that is constant and equal to one, regardless of the polynomial degree of the element. In addition, as a result of the orthogonality of the basis functions, the mass matrix is diagonal if the grid cells are parallelograms. Note that this is in contrast to the FE_DGPMonomial class that actually uses the monomial basis listed above as basis functions, without transformation from reference to real cell.

    The shape functions are defined in the class PolynomialSpace. The polynomials used inside PolynomialSpace are Polynomials::Legendre up to degree p given in FE_DGP. For the ordering of the basis functions, refer to PolynomialSpace, remembering that the Legendre polynomials are ordered by ascending degree.

    Note
    This element is not defined by finding shape functions within the given function space that interpolate a particular set of points. Consequently, there are no support points to which a given function could be interpolated; finding a finite element function that approximates a given function is therefore only possible through projection, rather than interpolation. Secondly, the shape functions of this element do not jointly add up to one. As a consequence of this, adding or subtracting a constant value – such as one would do to make a function have mean value zero – can not be done by simply subtracting the constant value from each degree of freedom. Rather, one needs to use the fact that the first basis function is constant equal to one and simply subtract the constant from the value of the degree of freedom corresponding to this first shape function on each cell.
    This class is only partially implemented for the codimension one case (spacedim != dim ), since no passage of information between meshes of different refinement level is possible because the embedding and projection matrices are not computed in the class constructor.

    Transformation properties

    -

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    -

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    +

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    +

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    -

    $P_0$ element

    +

    $P_0$ element

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -
    @@ -517,11 +517,11 @@

    $P_0$ element, shape function 0

    +

    $P_0$ element, shape function 0

    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -545,11 +545,11 @@

    -
    @@ -533,9 +533,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    -

    $P_2$ element

    +

    $P_2$ element

    - @@ -576,9 +576,9 @@ - @@ -591,11 +591,11 @@ - +
    @@ -561,9 +561,9 @@

    $P_2$ element, shape function 0

    +

    $P_2$ element, shape function 0

    -

    $P_2$ element, shape function 1

    +

    $P_2$ element, shape function 1

    $P_2$ element, shape function 2

    +

    $P_2$ element, shape function 2

    -

    $P_2$ element, shape function 3

    +

    $P_2$ element, shape function 3

    $P_2$ element, shape function 4

    +

    $P_2$ element, shape function 4

    -
    $P_2$ element, shape function 5
    $P_2$ element, shape function 5
    -

    $P_3$ element

    +

    $P_3$ element

    - @@ -622,9 +622,9 @@ - @@ -637,9 +637,9 @@ - @@ -652,9 +652,9 @@ - @@ -667,11 +667,11 @@ - +
    @@ -607,9 +607,9 @@

    $P_3$ element, shape function 0

    +

    $P_3$ element, shape function 0

    -

    $P_3$ element, shape function 1

    +

    $P_3$ element, shape function 1

    $P_3$ element, shape function 2

    +

    $P_3$ element, shape function 2

    -

    $P_3$ element, shape function 3

    +

    $P_3$ element, shape function 3

    $P_3$ element, shape function 4

    +

    $P_3$ element, shape function 4

    -

    $P_3$ element, shape function 5

    +

    $P_3$ element, shape function 5

    $P_3$ element, shape function 6

    +

    $P_3$ element, shape function 6

    -

    $P_3$ element, shape function 7

    +

    $P_3$ element, shape function 7

    $P_3$ element, shape function 8

    +

    $P_3$ element, shape function 8

    -
    $P_3$ element, shape function 9
    $P_3$ element, shape function 9
    -

    $P_4$ element

    +

    $P_4$ element

    - @@ -698,9 +698,9 @@ -
    @@ -683,9 +683,9 @@

    $P_4$ element, shape function 0

    +

    $P_4$ element, shape function 0

    -

    $P_4$ element, shape function 1

    +

    $P_4$ element, shape function 1

    $P_4$ element, shape function 2

    +

    $P_4$ element, shape function 2

    -

    $P_4$ element, shape function 3

    +

    $P_4$ element, shape function 3

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-11-15 06:44:10.983506822 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPMonomial.html 2024-11-15 06:44:10.983506822 +0000 @@ -504,21 +504,21 @@

    Detailed Description

    template<int dim>
    class FE_DGPMonomial< dim >

    Discontinuous finite elements based on monomials.

    -

    This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

    +

    This finite element implements complete polynomial spaces, that is, dim-dimensional polynomials of degree p. For example, in 2d the element FE_DGP(1) would represent the span of the functions $\{1,\hat x,\hat y\}$, which is in contrast to the element FE_DGQ(1) that is formed by the span of $\{1,\hat x,\hat y,\hat x\hat y\}$. Since the DGP space has only three unknowns for each quadrilateral, it is immediately clear that this element can not be continuous.

    The basis functions for this element are chosen to be the monomials listed above. Note that this is the main difference to the FE_DGP class that uses a set of polynomials of complete degree p that form a Legendre basis on the unit square. Thus, there, the mass matrix is diagonal, if the grid cells are parallelograms. The basis here does not have this property; however, it is simpler to compute. On the other hand, this element has the additional disadvantage that the local cell matrices usually have a worse condition number than the ones originating from the FE_DGP element.

    This class is not implemented for the codimension one case (spacedim != dim).

    Transformation properties

    -

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    -

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    +

    It is worth noting that under a (bi-, tri-)linear mapping, the space described by this element does not contain $P(k)$, even if we use a basis of polynomials of degree $k$. Consequently, for example, on meshes with non-affine cells, a linear function can not be exactly represented by elements of type FE_DGP(1) or FE_DGPMonomial(1).

    +

    This can be understood by the following 2-d example: consider the cell with vertices at $(0,0),(1,0),(0,1),(s,s)$:

    -

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
-x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
-y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    -

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    +

    For this cell, a bilinear transformation $F$ produces the relations $x=\hat
+x+\hat x\hat y$ and $y=\hat y+\hat x\hat y$ that correlate reference coordinates $\hat x,\hat y$ and coordinates in real space $x,y$. Under this mapping, the constant function is clearly mapped onto itself, but the two other shape functions of the $P_1$ space, namely $\phi_1(\hat x,\hat
+y)=\hat x$ and $\phi_2(\hat x,\hat y)=\hat y$ are mapped onto $\phi_1(x,y)=\frac{x-t}{t(s-1)},\phi_2(x,y)=t$ where $t=\frac{y}{s-x+sx+y-sy}$.

    +

    For the simple case that $s=1$, i.e. if the real cell is the unit square, the expressions can be simplified to $t=y$ and $\phi_1(x,y)=x,\phi_2(x,y)=y$. However, for all other cases, the functions $\phi_1(x,y),\phi_2(x,y)$ are not linear any more, and neither is any linear combination of them. Consequently, the linear functions are not within the range of the mapped $P_1$ polynomials.

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    -

    $P_0$ element

    +

    $P_0$ element

    -
    @@ -527,11 +527,11 @@

    $P_0$ element, shape function 0

    +

    $P_0$ element, shape function 0

    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -555,11 +555,11 @@

    -
    @@ -543,9 +543,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    -

    $P_2$ element

    +

    $P_2$ element

    - @@ -586,9 +586,9 @@ - @@ -601,11 +601,11 @@ - +
    @@ -571,9 +571,9 @@

    $P_2$ element, shape function 0

    +

    $P_2$ element, shape function 0

    -

    $P_2$ element, shape function 1

    +

    $P_2$ element, shape function 1

    $P_2$ element, shape function 2

    +

    $P_2$ element, shape function 2

    -

    $P_2$ element, shape function 3

    +

    $P_2$ element, shape function 3

    $P_2$ element, shape function 4

    +

    $P_2$ element, shape function 4

    -
    $P_2$ element, shape function 5
    $P_2$ element, shape function 5
    -

    $P_3$ element

    +

    $P_3$ element

    - @@ -632,9 +632,9 @@ - @@ -647,9 +647,9 @@ - @@ -662,9 +662,9 @@ - @@ -677,11 +677,11 @@ - +
    @@ -617,9 +617,9 @@

    $P_3$ element, shape function 0

    +

    $P_3$ element, shape function 0

    -

    $P_3$ element, shape function 1

    +

    $P_3$ element, shape function 1

    $P_3$ element, shape function 2

    +

    $P_3$ element, shape function 2

    -

    $P_3$ element, shape function 3

    +

    $P_3$ element, shape function 3

    $P_3$ element, shape function 4

    +

    $P_3$ element, shape function 4

    -

    $P_3$ element, shape function 5

    +

    $P_3$ element, shape function 5

    $P_3$ element, shape function 6

    +

    $P_3$ element, shape function 6

    -

    $P_3$ element, shape function 7

    +

    $P_3$ element, shape function 7

    $P_3$ element, shape function 8

    +

    $P_3$ element, shape function 8

    -
    $P_3$ element, shape function 9
    $P_3$ element, shape function 9
    -

    $P_4$ element

    +

    $P_4$ element

    - @@ -708,9 +708,9 @@ - @@ -723,9 +723,9 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-11-15 06:44:11.111507965 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGPNonparametric.html 2024-11-15 06:44:11.115508001 +0000 @@ -499,7 +499,7 @@

    Besides, this class is not implemented for the codimension one case (spacedim != dim).

    Visualization of shape functions

    In 2d, the shape functions of this element look as follows.

    -

    $P_0$ element

    +

    $P_0$ element

    @@ -693,9 +693,9 @@

    $P_4$ element, shape function 0

    +

    $P_4$ element, shape function 0

    -

    $P_4$ element, shape function 1

    +

    $P_4$ element, shape function 1

    $P_4$ element, shape function 2

    +

    $P_4$ element, shape function 2

    -

    $P_4$ element, shape function 3

    +

    $P_4$ element, shape function 3

    -
    @@ -508,11 +508,11 @@

    $P_0$ element, shape function 0

    +

    $P_0$ element, shape function 0

    -

    $P_1$ element

    +

    $P_1$ element

    - @@ -536,11 +536,11 @@

    -
    @@ -524,9 +524,9 @@

    $P_1$ element, shape function 0

    +

    $P_1$ element, shape function 0

    -

    $P_1$ element, shape function 1

    +

    $P_1$ element, shape function 1

    $P_1$ element, shape function 2

    +

    $P_1$ element, shape function 2

    -

    $P_2$ element

    +

    $P_2$ element

    - @@ -567,9 +567,9 @@ - @@ -582,11 +582,11 @@ - +
    @@ -552,9 +552,9 @@

    $P_2$ element, shape function 0

    +

    $P_2$ element, shape function 0

    -

    $P_2$ element, shape function 1

    +

    $P_2$ element, shape function 1

    $P_2$ element, shape function 2

    +

    $P_2$ element, shape function 2

    -

    $P_2$ element, shape function 3

    +

    $P_2$ element, shape function 3

    $P_2$ element, shape function 4

    +

    $P_2$ element, shape function 4

    -
    $P_2$ element, shape function 5
    $P_2$ element, shape function 5
    -

    $P_3$ element

    +

    $P_3$ element

    - @@ -613,9 +613,9 @@ - @@ -628,9 +628,9 @@ - @@ -643,9 +643,9 @@ - @@ -658,11 +658,11 @@ - +
    @@ -598,9 +598,9 @@

    $P_3$ element, shape function 0

    +

    $P_3$ element, shape function 0

    -

    $P_3$ element, shape function 1

    +

    $P_3$ element, shape function 1

    $P_3$ element, shape function 2

    +

    $P_3$ element, shape function 2

    -

    $P_3$ element, shape function 3

    +

    $P_3$ element, shape function 3

    $P_3$ element, shape function 4

    +

    $P_3$ element, shape function 4

    -

    $P_3$ element, shape function 5

    +

    $P_3$ element, shape function 5

    $P_3$ element, shape function 6

    +

    $P_3$ element, shape function 6

    -

    $P_3$ element, shape function 7

    +

    $P_3$ element, shape function 7

    $P_3$ element, shape function 8

    +

    $P_3$ element, shape function 8

    -
    $P_3$ element, shape function 9
    $P_3$ element, shape function 9
    -

    $P_4$ element

    +

    $P_4$ element

    - @@ -689,9 +689,9 @@ - @@ -704,9 +704,9 @@ - @@ -719,9 +719,9 @@ - /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-11-15 06:44:11.243509145 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQ.html 2024-11-15 06:44:11.243509145 +0000 @@ -530,7 +530,7 @@ *

    with node 13 being placed in the interior of the hex.

    Note, however, that these are just the Lagrange interpolation points of the shape functions. Even though they may physically be on the boundary of the cell, they are logically in the interior since there are no continuity requirements for these shape functions across cell boundaries. While discontinuous, when restricted to a single cell the shape functions of this element are exactly the same as those of the FE_Q element where they are shown visually.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_DGQ element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    Definition at line 111 of file fe_dgq.h.

    @@ -2294,17 +2294,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2337,21 +2337,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3481,7 +3481,7 @@
    @@ -674,9 +674,9 @@

    $P_4$ element, shape function 0

    +

    $P_4$ element, shape function 0

    -

    $P_4$ element, shape function 1

    +

    $P_4$ element, shape function 1

    $P_4$ element, shape function 2

    +

    $P_4$ element, shape function 2

    -

    $P_4$ element, shape function 3

    +

    $P_4$ element, shape function 3

    $P_4$ element, shape function 4

    +

    $P_4$ element, shape function 4

    -

    $P_4$ element, shape function 5

    +

    $P_4$ element, shape function 5

    $P_4$ element, shape function 6

    +

    $P_4$ element, shape function 6

    -

    $P_4$ element, shape function 7

    +

    $P_4$ element, shape function 7

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3583,7 +3583,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3844,8 +3844,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-11-15 06:44:11.371510288 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQArbitraryNodes.html 2024-11-15 06:44:11.371510288 +0000 @@ -2188,17 +2188,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2231,21 +2231,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3375,7 +3375,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3477,7 +3477,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3738,8 +3738,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-11-15 06:44:11.499511430 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQHermite.html 2024-11-15 06:44:11.499511430 +0000 @@ -2190,17 +2190,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2233,21 +2233,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3377,7 +3377,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3479,7 +3479,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3740,8 +3740,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-11-15 06:44:11.623512539 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGQLegendre.html 2024-11-15 06:44:11.623512539 +0000 @@ -2190,17 +2190,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2233,21 +2233,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3377,7 +3377,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3479,7 +3479,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3740,8 +3740,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-11-15 06:44:11.755513717 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGRaviartThomas.html 2024-11-15 06:44:11.755513717 +0000 @@ -3156,7 +3156,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3258,7 +3258,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3548,8 +3548,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3583,11 +3583,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-11-15 06:44:11.887514896 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__DGVector.html 2024-11-15 06:44:11.887514896 +0000 @@ -3173,7 +3173,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3275,7 +3275,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3565,8 +3565,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3600,11 +3600,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-11-15 06:44:12.023516111 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Enriched.html 2024-11-15 06:44:12.027516147 +0000 @@ -500,12 +500,12 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    class FE_Enriched< dim, spacedim >

    Implementation of a partition of unity finite element method (PUM) by Babuska and Melenk which enriches a standard finite element with an enrichment function multiplied with another (usually linear) finite element:

    -\[
+<picture><source srcset=\[
 U(\mathbf x) = \sum_i N_i(\mathbf x) U_i + \sum_j N_j(\mathbf x) \sum_k
-F_k(\mathbf x) U_{jk} \] +F_k(\mathbf x) U_{jk} \]" src="form_1146.png"/>

    -

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
-U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    +

    where $ N_i(\mathbf x) $ and $ N_j(\mathbf x) $ are the underlying finite elements (including the mapping from the isoparametric element to the real element); $ F_k(\mathbf x) $ are the scalar enrichment functions in real space (e.g. $ 1/r $, $ \exp(-r) $, etc); $ U_i $ and $
+U_{jk} $ are the standard and enriched DoFs. This allows to include in the finite element space a priori knowledge about the partial differential equation being solved which in turn improves the local approximation properties of the spaces. This can be useful for highly oscillatory solutions, problems with domain corners or on unbounded domains or sudden changes of boundary conditions. PUM method uses finite element spaces which satisfy the partition of unity property (e.g. FE_Q). Among other properties this makes the resulting space to reproduce enrichment functions exactly.

    The simplest constructor of this class takes two finite element objects and an enrichment function to be used. For example

    @@ -513,7 +513,7 @@
    Definition fe_q.h:554

    In this case, standard DoFs are distributed by FE_Q<dim>(2), whereas enriched DoFs are coming from a single finite element FE_Q<dim>(1) used with a single enrichment function function. In this case, the total number of DoFs on the enriched element is the sum of DoFs from FE_Q<dim>(2) and FE_Q<dim>(1).

    -

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    +

    As an example of an enrichment function, consider $ \exp(-x) $, which leads to the following shape functions on the unit element:

    @@ -526,7 +526,7 @@
    1d element, base and enriched shape functions. enriched shape function corresponding to the central vertex.

    Note that evaluation of gradients (hessians) of the enriched shape functions or the finite element field requires evaluation of gradients (gradients and hessians) of the enrichment functions:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U(\mathbf x)
     &= \sum_i N_i(\mathbf x) U_i
     + \sum_{j,k} N_j(\mathbf x) F_k(\mathbf x) U_{jk} \\
@@ -540,10 +540,10 @@
 F_k(\mathbf x) + \mathbf \nabla F_k(\mathbf x) \mathbf \nabla N_j(\mathbf x)
 + \mathbf \nabla N_j(\mathbf x) \mathbf \nabla F_k(\mathbf x) + N_j(\mathbf
 x) \mathbf \nabla \mathbf \nabla F_k(\mathbf x) \right] U_{jk}
-\end{align*} +\end{align*}" src="form_1155.png"/>

    Using enriched and non-enriched FEs together

    -

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));
    +

    In most applications it is beneficial to introduce enrichments only in some part of the domain (e.g. around a crack tip) and use standard FE (e.g. FE_Q) elsewhere. This can be achieved by using the hp-finite element framework in deal.II that allows for the use of different elements on different cells. To make the resulting space $C^0$ continuous, it is then necessary for the DoFHandler class and DoFTools::make_hanging_node_constraints() function to be able to figure out what to do at the interface between enriched and non-enriched cells. Specifically, we want the degrees of freedom corresponding to enriched shape functions to be zero at these interfaces. These classes and functions can not to do this automatically, but the effect can be achieved by using not just a regular FE_Q on cells without enrichment, but to wrap the FE_Q into an FE_Enriched object without actually enriching it. This can be done as follows:

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1));

    This constructor is equivalent to calling

    FE_Enriched<dim> fe_non_enriched(FE_Q<dim>(1),
    FE_Nothing<dim>(1,true),
    nullptr);
    @@ -3233,7 +3233,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3335,7 +3335,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3625,8 +3625,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3660,11 +3660,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-11-15 06:44:12.151517254 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP.html 2024-11-15 06:44:12.151517254 +0000 @@ -3240,7 +3240,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3342,7 +3342,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3603,8 +3603,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3638,11 +3638,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:12.283518433 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceP_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:12.283518433 +0000 @@ -3449,7 +3449,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3551,7 +3551,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3808,8 +3808,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3843,11 +3843,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-11-15 06:44:12.407519541 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ.html 2024-11-15 06:44:12.407519541 +0000 @@ -3287,7 +3287,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3389,7 +3389,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3650,8 +3650,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:12.535520684 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__FaceQ_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:12.535520684 +0000 @@ -3000,7 +3000,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3102,7 +3102,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3359,8 +3359,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3394,11 +3394,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-11-15 06:44:12.663521827 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Hermite.html 2024-11-15 06:44:12.667521863 +0000 @@ -493,8 +493,8 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_Hermite< dim, spacedim >

    This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

    -

    Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

    +class FE_Hermite< dim, spacedim >

    This class implements a Hermite interpolation basis of maximum regularity elements (see [CiarletRiavart1972interpolation]). These bases are always of odd polynomial degree, have regularity $r=\frac{p-1}{2}$ and are defined up to polynomial degree $p=13$, with larger degrees currently being ill-conditioned.

    +

    Each node has $(r+1)^{d}$ degrees of freedom (DoFs) assigned to it, corresponding to various derivatives up to order $r$ in each direction. DoFs at each node are not consecutive in lexicographic ordering for $d>1$ due to the tensor product construction of the basis. The ordering is determined by the direction of the derivative each function corresponds to; first by $x$-derivatives, then $y$, then $z$. Locally over each element the DoFs are ordered similarly. See below for the local ordering for $r=1$, where DoFs are ordered from 0 to $(2r+2)^{d}-1$:

    FE_Hermite<1>(3)

    * (0)________________(2)
     * (1)                (3)
    @@ -2037,17 +2037,17 @@
     

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2080,21 +2080,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3457,7 +3457,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3559,7 +3559,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3849,8 +3849,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3884,11 +3884,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -5180,7 +5180,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

    +

    Variable storing the order of the highest derivative that the current FE_Hermite object can enforce continuity for. Here the order of derivative only counts in one spatial direction, so the derivative $\frac{d^{2}f}{dx \; dy}$ would be counted as a first order derivative of $f$, as an example.

    Definition at line 262 of file fe_hermite.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-11-15 06:44:12.811523149 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nedelec.html 2024-11-15 06:44:12.811523149 +0000 @@ -532,12 +532,12 @@

    Detailed Description

    template<int dim>
    class FE_Nedelec< dim >
    Warning
    Several aspects of the implementation are experimental. For the moment, it is safe to use the element on globally refined meshes with consistent orientation of faces. See the todo entries below for more detailed caveats.
    -

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    -

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    +

    Implementation of Nédélec elements. The Nédélec space is designed to solve problems in which the solution only lives in the space $H^\text{curl}=\{ {\mathbf u} \in L_2: \text{curl}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose curl is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the Maxwell equations and corresponding simplifications, such as the reduced version of the Maxwell equation that only involves the electric field $\mathbf E$ which has to satisfy the equation $\text{curl}\, \text{curl}\, {\mathbf E} = 0$ in the time independent case when no currents are present, or the equation $\text{curl}\,\text{curl}\,{\mathbf A} = 4\pi{\mathbf j}$ that the magnetic vector potential $\mathbf A$ has to satisfy in the time independent case.

    +

    The defining characteristic of functions in $H^\text{curl}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the tangential component(s) of the vector field must be continuous across the line (or surface) even though the normal component may not be. As a consequence, the Nédélec element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the tangential component(s) of the vector field represented by each shape function are continuous across the faces of cells.

    Other properties of the Nédélec element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one.

    We follow the commonly used – though confusing – definition of the "degree" of Nédélec elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_Nedelec(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

    -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -545,7 +545,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

    Note that this follows the convention of Brezzi and Raviart, though not the one used in the original paper by Nédélec.

    This class is not implemented for the codimension one case (spacedim != dim).

    @@ -1405,11 +1405,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -4104,7 +4104,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4206,7 +4206,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4467,8 +4467,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-11-15 06:44:12.947524364 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ.html 2024-11-15 06:44:12.947524364 +0000 @@ -2985,7 +2985,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3087,7 +3087,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3377,8 +3377,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3412,11 +3412,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-11-15 06:44:12.979524649 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__NedelecSZ_1_1InternalData.html 2024-11-15 06:44:12.979524649 +0000 @@ -160,9 +160,9 @@ class FE_NedelecSZ< dim, spacedim >::InternalData

    Derived Internal data which is used to store cell-independent data. Note that due to the nature of this element, a number of useful pre-computed quantities are stored for the computation of cell-dependent shape functions.

    The main quantities which are stored are associated with edge and face parameterizations. These are:

    • -$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • +$\lambda_{i}$ - trilinear function, equal to one at the $i$-th vertex and zero at all other vertices.
    • -$\sigma_{i}$ - linear functional associated with the $i$-th vertex.
    • +$\sigma_{i}$ - linear functional associated with the $i$-th vertex.

    The definitions of these functionals, as well as the edge and face parameterizations and edge and face extension parameters, can be found on page 82 of Zaglmayr's thesis. The details of the definition of the globally-defined edge and face orientations can be found on page 67.

    @@ -295,9 +295,9 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Storage for all possible edge parameterization between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent.

    -

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    -

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    +

    The edge parameterization of an edge, E, starting at vertex i and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_values[q][i][j] stores the value of the edge parameterization connected by vertices $i$ and $j$ at the q-th quadrature point.

    +

    Note that not all of the $i$ and $j$ combinations result in valid edges on the hexahedral cell, but they are computed in this fashion for use with non-standard edge and face orientations.

    Definition at line 354 of file fe_nedelec_sz.h.

    @@ -317,8 +317,8 @@

    Storage for gradients of all possible edge parameterizations between vertices. These are required in the computation of edge- and face-based DoFs, which are cell-dependent. Note that the components of the gradient are constant.

    -

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    -

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    +

    The edge parameterization of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\sigma_{E} = \sigma_{i} - \sigma{j}$.

    +

    sigma_imj_grads[i][j][d] stores the gradient of the edge parameterization connected by vertices $i$ and $j$ in component $d$.

    Note that the gradient of the edge parameterization is constant on an edge, so we do not need to store it at every quadrature point.

    Definition at line 371 of file fe_nedelec_sz.h.

    @@ -381,10 +381,10 @@

    Storage for edge extension parameters at quadrature points. These are stored for the 12 edges such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
-\lambda_{j}$.

    -

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    -

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    +

    The edge extension parameter of an edge, $E$, starting at vertex $i$ and ending at vertex $j$ is given by $\lambda_{E} = \lambda_{i} +
+\lambda_{j}$.

    +

    Note that under this definition, the values of $\lambda_{E}$ do not change with the orientation of the edge.

    +

    edge_lambda_values[m][q] stores the edge extension parameter value at the $q$-th quadrature point on edge $m$.

    Definition at line 414 of file fe_nedelec_sz.h.

    @@ -424,7 +424,7 @@

    Storage for gradients of edge extension parameters in 3d. In this case they are non-constant. These are stored for the 12 edges such that the global vertex numbering* would follow the order defined by the "standard" deal.II cell.

    -

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    +

    edge_lambda_grads_3d[m][q][d] stores the gradient of the edge extension parameter for component $d$ at the $q$-th quadrature point on edge m.

    Definition at line 436 of file fe_nedelec_sz.h.

    @@ -464,10 +464,10 @@

    Storage for the face extension parameters. These are stored for the 6 faces such that the global vertex numbering would follow the order defined by the "standard" deal.II cell.

    -

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
-\lambda_{v4}$.

    -

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    -

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    +

    The face extension parameter of a face, F, defined by the vertices v1, v2, v3, v4 is given by $\lambda_{F} = \lambda_{v1} + \lambda_{v2} + \lambda_{v3} +
+\lambda_{v4}$.

    +

    Note that under this definition, the values of $\lambda_{F}$ do not change with the orientation of the face.

    +

    face_lambda_values[m][q] stores the face extension parameter value at the $q$-th quadrature point on face $m$.

    Definition at line 466 of file fe_nedelec_sz.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-11-15 06:44:13.099525721 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Nothing.html 2024-11-15 06:44:13.103525757 +0000 @@ -482,7 +482,7 @@ class FE_Nothing< dim, spacedim >

    Definition of a finite element space with zero degrees of freedom and that, consequently, can only represent a single function: the zero function.

    This class is useful (in the context of an hp-method) to represent empty cells in the triangulation on which no degrees of freedom should be allocated, or to describe a field that is extended by zero to a part of the domain where we don't need it. Thus a triangulation may be divided into two regions: an active region where normal elements are used, and an inactive region where FE_Nothing elements are used. The DoFHandler will therefore assign no degrees of freedom to the FE_Nothing cells, and this subregion is therefore implicitly deleted from the computation. step-10 and step-46 show use cases for this element. An interesting application for this element is also presented in the paper [Cangiani2012].

    FE_Nothing as seen as a function space

    -

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    +

    Finite elements are often best interpreted as forming a function space, i.e., a set of functions that form a vector space. One can indeed interpret FE_Nothing in this light: It corresponds to the function space $V_h=\{0\}$, i.e., the set of functions that are zero everywhere. (The constructor can take an argument that, if greater than one, extends the space to one of vector-valued functions with more than one component, with all components equal to zero everywhere.) Indeed, this is a vector space since every linear combination of elements in the vector space is also an element in the vector space, as is every multiple of the single element zero. It is obvious that the function space has no degrees of freedom, thus the name of the class.

    FE_Nothing in combination with other elements

    In situations such as those of step-46, one uses FE_Nothing on cells where one is not interested in a solution variable. For example, in fluid structure interaction problems, the fluid velocity is only defined on cells inside the fluid part of the domain. One then uses FE_Nothing on cells in the solid part of the domain to describe the finite element space for the velocity. In other words, the velocity lives everywhere conceptually, but it is identically zero in those parts of the domain where it is not of interest and doesn't use up any degrees of freedom there.

    The question is what happens at the interface between areas where one is interested in the solution (and uses a "normal" finite element) and where one is not interested (and uses FE_Nothing): Should the solution at that interface be zero – i.e., we consider a "continuous" finite element field that happens to be zero in that area where FE_Nothing is used – or is there no requirement for continuity at the interface. In the deal.II language, this is encoded by what the function FiniteElement::compare_for_domination() returns: If the FE_Nothing "dominates", then the solution must be zero at the interface; if it does not, then there is no requirement and one can think of FE_Nothing as a function space that is in general discontinuous (i.e., there is no requirement for any kind of continuity at cell interfaces) but on every cell equal to zero.

    @@ -2888,7 +2888,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -2990,7 +2990,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3251,8 +3251,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3286,11 +3286,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-11-15 06:44:13.227526865 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__P1NC.html 2024-11-15 06:44:13.231526900 +0000 @@ -489,13 +489,13 @@

    Detailed Description

    Implementation of the scalar version of the P1 nonconforming finite element, a piecewise linear element on quadrilaterals in 2d. This implementation is only for 2d cells in a 2d space (i.e., codimension 0).

    Unlike the usual continuous, $H^1$ conforming finite elements, the P1 nonconforming element does not enforce continuity across edges. However, it requires the continuity in an integral sense: any function in the space should have the same integral values on two sides of the common edge shared by two adjacent elements.

    -

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    +

    Thus, each function in the nonconforming element space can be discontinuous, and consequently not included in $H^1_0$, just like the basis functions in Discontinuous Galerkin (DG) finite element spaces. On the other hand, basis functions in DG spaces are completely discontinuous across edges without any relation between the values from both sides. This is a reason why usual weak formulations for DG schemes contain additional penalty terms for jump across edges to control discontinuity. However, nonconforming elements usually do not need additional terms in their weak formulations because their integrals along edges are the same from both sides, i.e., there is some level of continuity.

    Dice Rule

    Since any function in the P1 nonconforming space is piecewise linear on each element, the function value at the midpoint of each edge is same as the mean value on the edge. Thus the continuity of the integral value across each edge is equivalent to the continuity of the midpoint value of each edge in this case.

    Thus for the P1 nonconforming element, the function values at midpoints on edges of a cell are important. The first attempt to define (local) degrees of freedom (DoFs) on a quadrilateral is by using midpoint values of a function.

    However, these 4 functionals are not linearly independent because a linear function on 2d is uniquely determined by only 3 independent values. A simple observation reads that any linear function on a quadrilateral should satisfy the 'dice rule': the sum of two function values at the midpoints of the edge pair on opposite sides of a cell is equal to the sum of those at the midpoints of the other edge pair. This is called the 'dice rule' because the number of points on opposite sides of a dice always adds up to the same number as well (in the case of dice, to seven).

    -

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
-  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    +

    In formulas, the dice rule is written as $\phi(m_0) + \phi(m_1) = \phi(m_2) +
+  \phi(m_3)$ for all $\phi$ in the function space where $m_j$ is the midpoint of the edge $e_j$. Here, we assume the standard numbering convention for edges used in deal.II and described in class GeometryInfo.

    Conversely if 4 values at midpoints satisfying the dice rule are given, then there always exists the unique linear function which coincides with 4 midpoints values.

    Due to the dice rule, three values at any three midpoints can determine the last value at the last midpoint. It means that the number of independent local functionals on a cell is 3, and this is also the dimension of the linear polynomial space on a cell in 2d.

    Shape functions

    @@ -511,11 +511,11 @@ * | | * | | * 0---------|---------1 -*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    -

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    +*

    For each vertex $v_j$ of given cell, there are two edges of which $v_j$ is one of end points. Consider a linear function such that it has value 0.5 at the midpoints of two adjacent edges, and 0.0 at the two midpoints of the other edges. Note that the set of these values satisfies the dice rule which is described above. We denote such a function associated with vertex $v_j$ by $\phi_j$. Then the set of 4 shape functions is a partition of unity on a cell: $\sum_{j=0}^{3} \phi_j = 1$. (This is easy to see: at each edge midpoint, the sum of the four function adds up to one because two functions have value 0.5 and the other value 0.0. Because the function is globally linear, the only function that can have value 1 at four points must also be globally equal to one.)

    +

    The following figures represent $\phi_j$ for $j=0,\cdots,3$ with their midpoint values:

    • -

      shape function $\phi_0$:

      *  +--------0.0--------+
      +

      shape function $\phi_0$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -529,7 +529,7 @@
       *  

    • -

      shape function $\phi_1$:

      *  +--------0.0--------+
      +

      shape function $\phi_1$:

      *  +--------0.0--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -543,7 +543,7 @@
       *  

    • -

      shape function $\phi_2$:

      *  +--------0.5--------+
      +

      shape function $\phi_2$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -557,7 +557,7 @@
       *  

    • -

      shape function $\phi_3$:

      *  +--------0.5--------+
      +

      shape function $\phi_3$:

      *  +--------0.5--------+
       *  |                   |
       *  |                   |
       *  |                   |
      @@ -887,8 +887,8 @@
         
       
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    -

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
-x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    +

    Return the coefficients of 4 local linear shape functions $\phi_j(x,y) = a
+x + b y + c$ on given cell. For each local shape function, the array consists of three coefficients is in order of a,b and c.

    Definition at line 88 of file fe_p1nc.cc.

    @@ -2952,7 +2952,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3054,7 +3054,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3338,8 +3338,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3373,11 +3373,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-11-15 06:44:13.359528043 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Poly.html 2024-11-15 06:44:13.359528043 +0000 @@ -1420,17 +1420,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1465,21 +1465,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3013,7 +3013,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3115,7 +3115,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3405,8 +3405,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3440,11 +3440,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-11-15 06:44:13.495529258 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyFace.html 2024-11-15 06:44:13.499529294 +0000 @@ -2973,7 +2973,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3081,7 +3081,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3391,8 +3391,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3428,11 +3428,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-11-15 06:44:13.627530437 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PolyTensor.html 2024-11-15 06:44:13.631530473 +0000 @@ -509,12 +509,12 @@

    Similarly, in many cases, node functionals depend on the shape of the mesh cell, since they evaluate normal or tangential components on the faces. In order to allow for a set of transformations, the variable mapping_kind has been introduced. It needs be set in the constructor of a derived class.

    Any derived class must decide on the polynomial space to use. This polynomial space should be implemented simply as a set of vector valued polynomials like PolynomialsBDM and PolynomialsRaviartThomas. In order to facilitate this implementation, which basis the polynomial space chooses is not of importance to the current class – as described next, this class handles the transformation from the basis chosen by the polynomial space template argument to the basis we want to use for finite element computations internally.

    Determining the correct basis

    -

    In most cases, the basis used by the class that describes the polynomial space, $\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    -\begin{align*}
+<p>In most cases, the basis used by the class that describes the polynomial space, <picture><source srcset=$\{\tilde\varphi_j(\hat{\mathbf x})\}$, does not match the one we want to use for the finite element description, $\{\varphi_j(\hat{\mathbf x})\}$. Rather, we need to express the finite element shape functions as a linear combination of the basis provided by the polynomial space:

    +\begin{align*}
   \varphi_j = \sum_k c_{jk} \tilde\varphi_j.
-\end{align*} +\end{align*}" src="form_1225.png"/>

    -

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    +

    These expansion coefficients $c_{jk}$ are typically computed in the constructors of derived classes. To facilitate this, this class at first (unless told otherwise, see below), assumes that the shape functions should be exactly the ones provided by the polynomial space. In the constructor of the derived class, one then typically has code of the form

    // Now compute the inverse node matrix, generating the correct
    // basis functions from the raw ones. For a discussion of what
    // exactly happens here, see FETools::compute_node_matrix.
    @@ -527,7 +527,7 @@
    void invert(const FullMatrix< number2 > &M)
    FullMatrix< double > compute_node_matrix(const FiniteElement< dim, spacedim > &fe)
    -

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    +

    The FETools::compute_node_matrix() function explains in more detail what exactly it computes, and how; in any case, the result is that inverse_node_matrix now contains the expansion coefficients $c_{jk}$, and the fact that this block of code now sets the matrix to a non-zero size indicates to the functions of the current class that it should from then on use the expanded basis, $\{\varphi_j(\hat{\mathbf x})\}$, and no longer the original, "raw" basis $\{\tilde\varphi_j(\hat{\mathbf x})\}$ when asked for values or derivatives of shape functions.

    In order for this scheme to work, it is important to ensure that the size of the inverse_node_matrix be zero at the time when FETools::compute_node_matrix() is called; thus, the call to this function cannot be inlined into the last line – the result of the call really does need to be stored in the temporary object M.

    Setting the transformation

    In most cases, vector valued basis functions must be transformed when mapped from the reference cell to the actual grid cell. These transformations can be selected from the set MappingKind and stored in mapping_kind. Therefore, each constructor should contain a line like:

    @@ -2912,7 +2912,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3014,7 +3014,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3304,8 +3304,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3339,11 +3339,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-11-15 06:44:13.767531688 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidDGP.html 2024-11-15 06:44:13.767531688 +0000 @@ -714,11 +714,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    @@ -1735,17 +1735,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1778,21 +1778,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3268,7 +3268,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3370,7 +3370,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3660,8 +3660,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-11-15 06:44:13.895532831 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidP.html 2024-11-15 06:44:13.895532831 +0000 @@ -851,11 +851,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1872,17 +1872,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1915,21 +1915,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3292,7 +3292,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3394,7 +3394,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3684,8 +3684,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-11-15 06:44:14.027534009 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__PyramidPoly.html 2024-11-15 06:44:14.031534045 +0000 @@ -658,11 +658,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1685,17 +1685,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1728,21 +1728,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3276,7 +3276,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3378,7 +3378,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3668,8 +3668,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-11-15 06:44:14.167535260 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q.html 2024-11-15 06:44:14.171535296 +0000 @@ -508,7 +508,7 @@

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p. This TensorProductPolynomials object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomials object that includes the tensor product of Lagrange polynomials with the support points from points.

    Furthermore the constructor fills the interface_constraints, the prolongation (embedding) and the restriction matrices. These are implemented only up to a certain degree and may not be available for very high polynomial degree.

    Unit support point distribution and conditioning of interpolation

    -

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    +

    When constructing an FE_Q element at polynomial degrees one or two, equidistant support points at 0 and 1 (linear case) or 0, 0.5, and 1 (quadratic case) are used. The unit support or nodal points xi are those points where the jth Lagrange polynomial satisfies the $\delta_{ij}$ property, i.e., where one polynomial is one and all the others are zero. For higher polynomial degrees, the support points are non-equidistant by default, and chosen to be the support points of the (degree+1)-order Gauss-Lobatto quadrature rule. This point distribution yields well-conditioned Lagrange interpolation at arbitrary polynomial degrees. By contrast, polynomials based on equidistant points get increasingly ill-conditioned as the polynomial degree increases. In interpolation, this effect is known as the Runge phenomenon. For Galerkin methods, the Runge phenomenon is typically not visible in the solution quality but rather in the condition number of the associated system matrices. For example, the elemental mass matrix of equidistant points at degree 10 has condition number 2.6e6, whereas the condition number for Gauss-Lobatto points is around 400.

    The Gauss-Lobatto points in 1d include the end points 0 and +1 of the unit interval. The interior points are shifted towards the end points, which gives a denser point distribution close to the element boundary.

    If combined with Gauss-Lobatto quadrature, FE_Q based on the default support points gives diagonal mass matrices. This case is demonstrated in step-48. However, this element can be combined with arbitrary quadrature rules through the usual FEValues approach, including full Gauss quadrature. In the general case, the mass matrix is non-diagonal.

    Numbering of the degrees of freedom (DoFs)

    @@ -694,9 +694,9 @@ - @@ -709,9 +709,9 @@ - @@ -724,9 +724,9 @@ - @@ -739,9 +739,9 @@ - @@ -751,7 +751,7 @@

    -

    $Q_2$ element, shape function 0

    +

    $Q_2$ element, shape function 0

    -

    $Q_2$ element, shape function 1

    +

    $Q_2$ element, shape function 1

    $Q_2$ element, shape function 2

    +

    $Q_2$ element, shape function 2

    -

    $Q_2$ element, shape function 3

    +

    $Q_2$ element, shape function 3

    $Q_2$ element, shape function 4

    +

    $Q_2$ element, shape function 4

    -

    $Q_2$ element, shape function 5

    +

    $Q_2$ element, shape function 5

    $Q_2$ element, shape function 6

    +

    $Q_2$ element, shape function 6

    -

    $Q_2$ element, shape function 7

    +

    $Q_2$ element, shape function 7

    $Q_2$ element, shape function 8

    +

    $Q_2$ element, shape function 8

    @@ -920,9 +920,9 @@
    -

    $Q_4$ element, shape function 0

    +

    $Q_4$ element, shape function 0

    -

    $Q_4$ element, shape function 1

    +

    $Q_4$ element, shape function 1

    @@ -935,9 +935,9 @@ -

    $Q_4$ element, shape function 2

    +

    $Q_4$ element, shape function 2

    -

    $Q_4$ element, shape function 3

    +

    $Q_4$ element, shape function 3

    @@ -950,9 +950,9 @@ -

    $Q_4$ element, shape function 4

    +

    $Q_4$ element, shape function 4

    -

    $Q_4$ element, shape function 5

    +

    $Q_4$ element, shape function 5

    @@ -965,9 +965,9 @@ -

    $Q_4$ element, shape function 6

    +

    $Q_4$ element, shape function 6

    -

    $Q_4$ element, shape function 7

    +

    $Q_4$ element, shape function 7

    @@ -980,9 +980,9 @@ -

    $Q_4$ element, shape function 8

    +

    $Q_4$ element, shape function 8

    -

    $Q_4$ element, shape function 9

    +

    $Q_4$ element, shape function 9

    @@ -995,9 +995,9 @@ -

    $Q_4$ element, shape function 10

    +

    $Q_4$ element, shape function 10

    -

    $Q_4$ element, shape function 11

    +

    $Q_4$ element, shape function 11

    @@ -1010,9 +1010,9 @@ -

    $Q_4$ element, shape function 12

    +

    $Q_4$ element, shape function 12

    -

    $Q_4$ element, shape function 13

    +

    $Q_4$ element, shape function 13

    @@ -1025,9 +1025,9 @@ -

    $Q_4$ element, shape function 14

    +

    $Q_4$ element, shape function 14

    -

    $Q_4$ element, shape function 15

    +

    $Q_4$ element, shape function 15

    @@ -1040,9 +1040,9 @@ -

    $Q_4$ element, shape function 16

    +

    $Q_4$ element, shape function 16

    -

    $Q_4$ element, shape function 17

    +

    $Q_4$ element, shape function 17

    @@ -1055,9 +1055,9 @@ -

    $Q_4$ element, shape function 18

    +

    $Q_4$ element, shape function 18

    -

    $Q_4$ element, shape function 19

    +

    $Q_4$ element, shape function 19

    @@ -1070,9 +1070,9 @@ -

    $Q_4$ element, shape function 20

    +

    $Q_4$ element, shape function 20

    -

    $Q_4$ element, shape function 21

    +

    $Q_4$ element, shape function 21

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-11-15 06:44:14.303536474 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Base.html 2024-11-15 06:44:14.307536510 +0000 @@ -2265,17 +2265,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2308,21 +2308,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3486,7 +3486,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3588,7 +3588,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3849,8 +3849,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3884,11 +3884,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-11-15 06:44:14.439537689 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Bubbles.html 2024-11-15 06:44:14.439537689 +0000 @@ -507,17 +507,17 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_Q_Bubbles< dim, spacedim >

    Implementation of a scalar Lagrange finite element $Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
+class FE_Q_Bubbles< dim, spacedim ></div><p>Implementation of a scalar Lagrange finite element <picture><source srcset=$Q_p^+$ that yields the finite element space of continuous, piecewise polynomials of degree p in each coordinate direction plus some (non-normalized) bubble enrichment space spanned by the additional shape function $\varphi_j(\mathbf x)
 = 2^{p-1}\left(x_j-\frac 12\right)^{p-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_1233.png"/>. for $j=0,\ldots,dim-1$. If $p$ is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell. Because these last shape functions have polynomial degree is $p+1$, the overall polynomial degree of the shape functions in the space described by this class is $p+1$.

    This class is realized using tensor product polynomials based on equidistant or given support points, in the same way as one can provide support points to the FE_Q class's constructors.

    For more information about the spacedim template parameter check the documentation of the FiniteElement class, or the one of Triangulation.

    -

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    +

    Due to the fact that the enrichments are small almost everywhere for large $p$, the condition number for the mass and stiffness matrix quickly increaseses with increasing $p$. Below you see a comparison with FE_Q(QGaussLobatto(p+1)) for dim=1.

    -

    Therefore, this element should be used with care for $p>3$.

    +

    Therefore, this element should be used with care for $p>3$.

    Implementation

    The constructor creates a TensorProductPolynomials object that includes the tensor product of LagrangeEquidistant polynomials of degree p plus the bubble enrichments. This TensorProductPolynomialsBubbles object provides all values and derivatives of the shape functions. In case a quadrature rule is given, the constructor creates a TensorProductPolynomialsBubbles object that includes the tensor product of Lagrange polynomials with the support points from points and the bubble enrichments as defined above.

    Furthermore the constructor fills the interface_constrains, the prolongation (embedding) and the restriction matrices.

    @@ -736,11 +736,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2459,17 +2459,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2502,21 +2502,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3593,7 +3593,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3695,7 +3695,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3956,8 +3956,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-11-15 06:44:14.571538868 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__DG0.html 2024-11-15 06:44:14.571538868 +0000 @@ -909,11 +909,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2636,17 +2636,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2679,21 +2679,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3770,7 +3770,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3872,7 +3872,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4133,8 +4133,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-11-15 06:44:14.719540190 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__Hierarchical.html 2024-11-15 06:44:14.719540190 +0000 @@ -3219,17 +3219,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -3264,21 +3264,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -4658,7 +4658,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -4760,7 +4760,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -5021,8 +5021,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -5056,11 +5056,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-11-15 06:44:14.851541369 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__Q__iso__Q1.html 2024-11-15 06:44:14.855541405 +0000 @@ -504,14 +504,14 @@
    template<int dim, int spacedim = dim>
    class FE_Q_iso_Q1< dim, spacedim >

    Implementation of a scalar Lagrange finite element Qp-iso-Q1 that defines the finite element space of continuous, piecewise linear elements with p subdivisions in each coordinate direction. It yields an element with the same number of degrees of freedom as the Qp elements but using linear interpolation instead of higher order one. In other words, on every cell, the shape functions are not of higher order polynomial degree interpolating a set of node points, but are piecewise (bi-, tri-)linear within the cell and interpolating the same set of node points. This type of element is also called macro element in the literature as it can be seen as consisting of several smaller elements, namely pdim such sub-cells.

    The numbering of degrees of freedom is done in exactly the same way as in FE_Q of degree p. See there for a detailed description on how degrees of freedom are numbered within one element.

    -

    This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

    +

    This element represents a Q-linear finite element space on a reduced mesh of size h/p. Its effect is equivalent to using FE_Q of degree one on a finer mesh by a factor p if an equivalent quadrature is used. However, this element reduces the flexibility in the choice of (adaptive) mesh size by exactly this factor p, which typically reduces efficiency. On the other hand, comparing this element with p subdivisions to the FE_Q element of degree p on the same mesh shows that the convergence is typically much worse for smooth problems. In particular, Qp elements achieve interpolation orders of hp+1 in the $L_2$ norm, whereas these elements reach only (h/p)2. For these two reasons, this element is usually not very useful as a standalone. In addition, any evaluation of face terms on the boundaries within the elements becomes impossible with this element because deal.II does not have the equivalent of FEFaceValues for lower-dimensional integrals in the interior of cells.

    Nonetheless, there are a few use cases where this element actually is useful:

    1. Systems of PDEs where certain variables demand for higher resolutions than the others and the additional degrees of freedom should be spent on increasing the resolution of linears instead of higher order polynomials, and you do not want to use two different meshes for the different components. This can be the case when irregularities (shocks) appear in the solution and stabilization techniques are used that work for linears but not higher order elements.

    2. -

      Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau +

      Stokes/Navier Stokes systems such as the one discussed in step-22 could be solved with Q2-iso-Q1 elements for velocities instead of $Q_2$ elements. Combined with $Q_1$ pressures they give a stable mixed element pair. However, they perform worse than the standard (Taylor-Hood $Q_2\times Q_1$) approach in most situations. (See, for example, [Boffi2011] .) This combination of subdivided elements for the velocity and non-subdivided elements for the pressure is sometimes called the "Bercovier-Pironneau element" and dates back to around the same time as the Taylor-Hood element (namely, the mid-1970s). For more information, see the paper by Bercovier and Pironneau from 1979 [Bercovier1979], and for the origins of the comparable Taylor-Hood element see [Taylor73] from 1973.

    3. @@ -2406,17 +2406,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2449,21 +2449,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3540,7 +3540,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3642,7 +3642,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3903,8 +3903,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-11-15 06:44:14.991542620 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RT__Bubbles.html 2024-11-15 06:44:14.995542655 +0000 @@ -510,7 +510,7 @@ class FE_RT_Bubbles< dim >

    This class implements a curl-enhanced Raviart-Thomas elements, conforming with Hdiv space. The node functionals are defined as point values in Gauss-Lobatto points. These elements generate vector fields with normal components continuous between mesh cells. The purpose of this finite element is in localizing the interactions between degrees of freedom around the nodes when an appropriate quadrature rule is used, leading to a block-diagonal mass matrix (even with full-tensor coefficient).

    The elements are defined through enrichment of classical Raviart-Thomas elements with extra curls, so that the Hdiv conformity is preserved, and the total number of degrees of freedom of FE_RT_Bubbles of order k is equal to the number of DoFs in dim copies of FE_Q of order k.

    Note
    Unlike Raviart-Thomas, the lowest possible order for this enhanced finite element is 1, i.e. $k \ge 1$.
    -

    The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

    +

    The matching pressure space for FE_RT_Bubbles of order k is FE_DGQ of order k-1. With the exact integration, this pair yields $(k+1)$-st order of convergence in $L_2$-norm for a vector variable and $k$-th order in $L_2$-norm for a scalar one (same as $BDM_k \times P_{k-1}$).

    For this enhanced Raviart-Thomas element, the node values are not cell and face moments with respect to certain polynomials, but the values in Gauss-Lobatto quadrature points. The nodal values on edges (faces in 3d) are evaluated first, according to the natural ordering of the edges (faces) of a cell. The interior degrees of freedom are evaluated last.

    For an RT-Bubbles element of degree k, we choose (k+1)dim-1 Gauss-Lobatto points on each face. These points are ordered lexicographically with respect to the orientation of the face. In the interior of the cells, the values are computed using an anisotropic Gauss-Lobatto formula for integration. The mass matrix assembled with the use of this same quadrature rule, is block diagonal with blocks corresponding to quadrature points. See "Higher order multipoint flux mixed finite element methods on quadrilaterals and hexahedra" for more details.

    @@ -749,11 +749,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3348,7 +3348,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3450,7 +3450,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3740,8 +3740,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-11-15 06:44:15.127543834 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RannacherTurek.html 2024-11-15 06:44:15.127543834 +0000 @@ -730,11 +730,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1859,17 +1859,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1904,21 +1904,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3394,7 +3394,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3496,7 +3496,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3786,8 +3786,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-11-15 06:44:15.267545084 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomas.html 2024-11-15 06:44:15.267545084 +0000 @@ -519,11 +519,11 @@

    Detailed Description

    template<int dim>
    -class FE_RaviartThomas< dim >

    Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

    +class FE_RaviartThomas< dim >

    Implementation of Raviart-Thomas (RT) elements. The Raviart-Thomas space is designed to solve problems in which the solution only lives in the space $H^\text{div}=\{ {\mathbf u} \in L_2: \text{div}\, {\mathbf u} \in L_2\}$, rather than in the more commonly used space $H^1=\{ u \in L_2: \nabla u \in L_2\}$. In other words, the solution must be a vector field whose divergence is square integrable, but for which the gradient may not be square integrable. The typical application for this space (and these elements) is to the mixed formulation of the Laplace equation and related situations, see for example step-20. The defining characteristic of functions in $H^\text{div}$ is that they are in general discontinuous – but that if you draw a line in 2d (or a surface in 3d), then the normal component of the vector field must be continuous across the line (or surface) even though the tangential component may not be. As a consequence, the Raviart-Thomas element is constructed in such a way that (i) it is vector-valued, (ii) the shape functions are discontinuous, but (iii) the normal component of the vector field represented by each shape function is continuous across the faces of cells.

    Other properties of the Raviart-Thomas element are that (i) it is not a primitive element ; (ii) the shape functions are defined so that certain integrals over the faces are either zero or one, rather than the common case of certain point values being either zero or one. (There is, however, the FE_RaviartThomasNodal element that uses point values.)

    We follow the commonly used – though confusing – definition of the "degree" of RT elements. Specifically, the "degree" of the element denotes the polynomial degree of the largest complete polynomial subspace contained in the finite element space, even if the space may contain shape functions of higher polynomial degree. The lowest order element is consequently FE_RaviartThomas(0), i.e., the Raviart-Thomas element "of degree zero", even though the functions of this space are in general polynomials of degree one in each variable. This choice of "degree" implies that the approximation order of the function itself is degree+1, as with usual polynomial spaces. The numbering so chosen implies the sequence

    -\[
+<picture><source srcset=\[
   Q_{k+1}
   \stackrel{\text{grad}}{\rightarrow}
   \text{Nedelec}_k
@@ -531,7 +531,7 @@
   \text{RaviartThomas}_k
   \stackrel{\text{div}}{\rightarrow}
   DGQ_{k}
-\] +\]" src="form_1195.png"/>

    This class is not implemented for the codimension one case (spacedim != dim).

    Interpolation

    @@ -798,11 +798,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3463,7 +3463,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3565,7 +3565,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3826,8 +3826,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-11-15 06:44:15.403546299 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__RaviartThomasNodal.html 2024-11-15 06:44:15.403546299 +0000 @@ -811,11 +811,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -3482,7 +3482,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3584,7 +3584,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3874,8 +3874,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-11-15 06:44:15.531547443 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexDGP.html 2024-11-15 06:44:15.531547443 +0000 @@ -490,7 +490,7 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_SimplexDGP< dim, spacedim >

    Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

    +class FE_SimplexDGP< dim, spacedim >

    Implementation of a scalar discontinuous Lagrange finite element $P_k$, sometimes denoted as $P_{-k}$, that yields the finite element space of discontinuous, piecewise polynomials of degree $k$.

    Also see Simplex support.

    Definition at line 188 of file fe_simplex_p.h.

    @@ -1050,11 +1050,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2071,17 +2071,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2114,21 +2114,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3344,7 +3344,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3446,7 +3446,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3707,8 +3707,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-11-15 06:44:15.659548586 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP.html 2024-11-15 06:44:15.659548586 +0000 @@ -490,7 +490,7 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class FE_SimplexP< dim, spacedim >

    Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

    +class FE_SimplexP< dim, spacedim >

    Implementation of a scalar Lagrange finite element $P_k$ that yields the finite element space of continuous, piecewise polynomials of degree $k$. The corresponding element on hypercube cells is FE_Q, on wegdes it is FE_WedgeP, and on pyramids it is FE_PyramidP.

    Also see Simplex support.

    Definition at line 132 of file fe_simplex_p.h.

    @@ -1050,11 +1050,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -2071,17 +2071,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2114,21 +2114,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3344,7 +3344,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3446,7 +3446,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3707,8 +3707,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-11-15 06:44:15.787549729 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexP__Bubbles.html 2024-11-15 06:44:15.791549765 +0000 @@ -957,11 +957,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1978,17 +1978,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -2021,21 +2021,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3336,7 +3336,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3438,7 +3438,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3699,8 +3699,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-11-15 06:44:15.919550908 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__SimplexPoly.html 2024-11-15 06:44:15.923550944 +0000 @@ -916,11 +916,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1943,17 +1943,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1986,21 +1986,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3359,7 +3359,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3461,7 +3461,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3722,8 +3722,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-11-15 06:44:16.047552051 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ.html 2024-11-15 06:44:16.047552051 +0000 @@ -3274,7 +3274,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3376,7 +3376,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3637,8 +3637,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:16.175553195 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__TraceQ_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:16.175553195 +0000 @@ -3449,7 +3449,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3551,7 +3551,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3808,8 +3808,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3843,11 +3843,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-11-15 06:44:16.307554373 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeDGP.html 2024-11-15 06:44:16.311554409 +0000 @@ -714,11 +714,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    @@ -1735,17 +1735,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1778,21 +1778,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3268,7 +3268,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3370,7 +3370,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3660,8 +3660,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-11-15 06:44:16.443555588 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgeP.html 2024-11-15 06:44:16.443555588 +0000 @@ -851,11 +851,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1872,17 +1872,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1915,21 +1915,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3292,7 +3292,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3394,7 +3394,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3684,8 +3684,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-11-15 06:44:16.575556767 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFE__WedgePoly.html 2024-11-15 06:44:16.575556767 +0000 @@ -658,11 +658,11 @@

    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    @@ -1685,17 +1685,17 @@

    Correct the shape Hessians by subtracting the terms corresponding to the Jacobian pushed forward gradient.

    Before the correction, the Hessians would be given by

    -\[
+<picture><source srcset=\[
 D_{ijk} = \frac{d^2\phi_i}{d \hat x_J d \hat x_K} (J_{jJ})^{-1}
 (J_{kK})^{-1},
-\] +\]" src="form_1216.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct Hessians would be given by

    +\[
 \frac{d^2 \phi_i}{d x_j d x_k} = D_{ijk} - H_{mjk} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1218.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative.

    @@ -1728,21 +1728,21 @@

    Correct the shape third derivatives by subtracting the terms corresponding to the Jacobian pushed forward gradient and second derivative.

    Before the correction, the third derivatives would be given by

    -\[
+<picture><source srcset=\[
 D_{ijkl} = \frac{d^3\phi_i}{d \hat x_J d \hat x_K d \hat x_L} (J_{jJ})^{-1}
 (J_{kK})^{-1} (J_{lL})^{-1},
-\] +\]" src="form_1220.png"/>

    -

    where $J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    -\[
+<p> where <picture><source srcset=$J_{iI}=\frac{d x_i}{d \hat x_I}$. After the correction, the correct third derivative would be given by

    +\[
 \frac{d^3\phi_i}{d x_j d x_k d x_l} = D_{ijkl} - H_{mjl} \frac{d^2
 \phi_i}{d x_k d x_m}
 - H_{mkl} \frac{d^2 \phi_i}{d x_j d x_m} - H_{mjk} \frac{d^2 \phi_i}{d x_l
 d x_m}
 - K_{mjkl} \frac{d \phi_i}{d x_m},
-\] +\]" src="form_1221.png"/>

    -

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    +

    where $H_{ijk}$ is the Jacobian pushed-forward derivative and $K_{ijkl}$ is the Jacobian pushed-forward second derivative.

    @@ -3276,7 +3276,7 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3378,7 +3378,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -3668,8 +3668,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-11-15 06:44:16.715558018 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElement.html 2024-11-15 06:44:16.719558053 +0000 @@ -500,8 +500,8 @@
    template<int dim, int spacedim = dim>
    class FiniteElement< dim, spacedim >

    This is the base class for finite elements in arbitrary dimensions. It declares the interface both in terms of member variables and public member functions through which properties of a concrete implementation of a finite element can be accessed. This interface generally consists of a number of groups of variables and functions that can roughly be delineated as follows:

    • Basic information about the finite element, such as the number of degrees of freedom per vertex, edge, or cell. This kind of data is stored in the FiniteElementData base class. (Though the FiniteElement::get_name() member function also falls into this category.)
    • -
    • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
    • -
    • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
    • +
    • A description of the shape functions and their derivatives on the reference cell $[0,1]^d$, if an element is indeed defined by mapping shape functions from the reference cell to an actual cell.
    • +
    • Matrices (and functions that access them) that describe how an element's shape functions related to those on parent or child cells (restriction or prolongation) or neighboring cells (for hanging node constraints), as well as to other finite element spaces defined on the same cell (e.g., when doing $p$ refinement).
    • Functions that describe the properties of individual shape functions, for example which vector components of a vector-valued finite element's shape function is nonzero, or whether an element is primitive.
    • For elements that are interpolatory, such as the common $Q_p$ Lagrange elements, data that describes where their support points are located.
    • Functions that define the interface to the FEValues class that is almost always used to access finite element shape functions from user code.
    • @@ -586,7 +586,7 @@
    21 1 0 8 1

    What we see is the following: there are a total of 22 degrees-of-freedom on this element with components ranging from 0 to 2. Each DoF corresponds to one of the two base elements used to build FESystem : $\mathbb Q_2$ or $\mathbb
-  Q_1$. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

    + Q_1$" src="form_1037.png"/>. Since FE_Q are primitive elements, we have a total of 9 distinct scalar-valued shape functions for the quadratic element and 4 for the linear element. Finally, for DoFs corresponding to the first base element multiplicity is either zero or one, meaning that we use the same scalar valued $\mathbb Q_2$ for both $x$ and $y$ components of the velocity field $\mathbb Q_2 \otimes \mathbb Q_2$. For DoFs corresponding to the second base element multiplicity is zero.

    Support points

    Finite elements are frequently defined by defining a polynomial space and a set of dual functionals. If these functionals involve point evaluations, then the element is "interpolatory" and it is possible to interpolate an arbitrary (but sufficiently smooth) function onto the finite element space by evaluating it at these points. We call these points "support points".

    Most finite elements are defined by mapping from the reference cell to a concrete cell. Consequently, the support points are then defined on the reference ("unit") cell, see this glossary entry. The support points on a concrete cell can then be computed by mapping the unit support points, using the Mapping class interface and derived classes, typically via the FEValues class.

    @@ -618,7 +618,7 @@

    Through this construction, the degrees of freedom on the child faces are constrained to the degrees of freedom on the parent face. The information so provided is typically consumed by the DoFTools::make_hanging_node_constraints() function.

    Note
    The hanging node constraints described by these matrices are only relevant to the case where the same finite element space is used on neighboring (but differently refined) cells. The case that the finite element spaces on different sides of a face are different, i.e., the $hp$ case (see hp-finite element support) is handled by separate functions. See the FiniteElement::get_face_interpolation_matrix() and FiniteElement::get_subface_interpolation_matrix() functions.

    Interpolation matrices in three dimensions

    -

    For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

    +

    For the interface constraints, the 3d case is similar to the 2d case. The numbering for the indices $n$ on the mother face is obvious and keeps to the usual numbering of degrees of freedom on quadrilaterals.

    The numbering of the degrees of freedom on the interior of the refined faces for the index $m$ is as follows: let $d_v$ and $d_l$ be as above, and $d_q$ be the number of degrees of freedom per quadrilateral (and therefore per face), then $m=0...d_v-1$ denote the dofs on the vertex at the center, $m=d_v...5d_v-1$ for the dofs on the vertices at the center of the bounding lines of the quadrilateral, $m=5d_v..5d_v+4*d_l-1$ are for the degrees of freedom on the four lines connecting the center vertex to the outer boundary of the mother face, $m=5d_v+4*d_l...5d_v+4*d_l+8*d_l-1$ for the degrees of freedom on the small lines surrounding the quad, and $m=5d_v+12*d_l...5d_v+12*d_l+4*d_q-1$ for the dofs on the four child faces. Note the direction of the lines at the boundary of the quads, as shown below.

    The order of the twelve lines and the four child faces can be extracted from the following sketch, where the overall order of the different dof groups is depicted:

    *    *--15--4--16--*
     *    |      |      |
    @@ -660,7 +660,7 @@
     
  • Compute the basis vj of the finite element shape function space by applying M-1 to the basis wj.
  • -

    The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

    +

    The matrix M may be computed with FETools::compute_node_matrix(). This function relies on the existence of generalized_support_points and FiniteElement::convert_generalized_support_point_values_to_dof_values() (see the glossary entry on generalized support points for more information). With this, one can then use the following piece of code in the constructor of a class derived from FiniteElement to compute the $M$ matrix:

    this->inverse_node_matrix.reinit(this->n_dofs_per_cell(),
    this->n_dofs_per_cell()); this->inverse_node_matrix.invert(M);
    @@ -697,7 +697,7 @@ R_1 = \left(\begin{matrix}0 & 0 \\ 0 & 1\end{matrix}\right). \]" src="form_1057.png"/>

    -

    However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

    meshes: *-------* *----*----*
    +

    However, this approach already fails if we go to a $Q_2$ element with the following degrees of freedom:

    meshes: *-------* *----*----*
    local DoF numbers: 0 2 1 0 2 1|0 2 1
    global DoF numbers: 0 2 1 0 2 1 4 3

    Writing things as the sum over matrix operations as above would not easily work because we have to add nonzero values to $U^\text{coarse}_2$ twice, once for each child.

    @@ -2629,7 +2629,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -2713,7 +2713,7 @@
    scalarAn object that represents a single scalar vector component of this finite element.

    Given a component mask (see this glossary entry), produce a block mask (see this glossary entry) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    Parameters
    @@ -2967,8 +2967,8 @@
    component_maskThe mask that selects individual components of the finite element

    For a given degree of freedom, return whether it is logically associated with a vertex, line, quad or hex.

    -

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    -

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    +

    For instance, for continuous finite elements this coincides with the lowest dimensional object the support point of the degree of freedom lies on. To give an example, for $Q_1$ elements in 3d, every degree of freedom is defined by a shape function that we get by interpolating using support points that lie on the vertices of the cell. The support of these points of course extends to all edges connected to this vertex, as well as the adjacent faces and the cell interior, but we say that logically the degree of freedom is associated with the vertex as this is the lowest-dimensional object it is associated with. Likewise, for $Q_2$ elements in 3d, the degrees of freedom with support points at edge midpoints would yield a value of GeometryPrimitive::line from this function, whereas those on the centers of faces in 3d would return GeometryPrimitive::quad.

    +

    To make this more formal, the kind of object returned by this function represents the object so that the support of the shape function corresponding to the degree of freedom, (i.e., that part of the domain where the function "lives") is the union of all of the cells sharing this object. To return to the example above, for $Q_2$ in 3d, the shape function with support point at an edge midpoint has support on all cells that share the edge and not only the cells that share the adjacent faces, and consequently the function will return GeometryPrimitive::line.

    On the other hand, for discontinuous elements of type $DGQ_2$, a degree of freedom associated with an interpolation polynomial that has its support point physically located at a line bounding a cell, but is nonzero only on one cell. Consequently, it is logically associated with the interior of that cell (i.e., with a GeometryPrimitive::quad in 2d and a GeometryPrimitive::hex in 3d).

    Parameters
    @@ -3004,11 +3004,11 @@
    -

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
-$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    -

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    -

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    -

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    +

    Given the values of a function $f(\mathbf x)$ at the (generalized) support points of the reference cell, this function then computes what the nodal values of the element are, i.e., $\Psi_i[f]$, where $\Psi_i$ are the node functionals of the element (see also Node values or node functionals). The values $\Psi_i[f]$ are then the expansion coefficients for the shape functions of the finite element function that interpolates the given function $f(x)$, i.e., $ f_h(\mathbf x) = \sum_i \Psi_i[f] \varphi_i(\mathbf x)
+$ is the finite element interpolant of $f$ with the current element. The operation described here is used, for example, in the FETools::compute_node_matrix() function.

    +

    In more detail, let us assume that the generalized support points (see this glossary entry ) of the current element are $\hat{\mathbf x}_i$ and that the node functionals associated with the current element are $\Psi_i[\cdot]$. Then, the fact that the element is based on generalized support points, implies that if we apply $\Psi_i$ to a (possibly vector-valued) finite element function $\varphi$, the result must have the form $\Psi_i[\varphi] = f_i(\varphi(\hat{\mathbf x}_i))$ – in other words, the value of the node functional $\Psi_i$ applied to $\varphi$ only depends on the values of $\varphi$ at $\hat{\mathbf x}_i$ and not on values anywhere else, or integrals of $\varphi$, or any other kind of information.

    +

    The exact form of $f_i$ depends on the element. For example, for scalar Lagrange elements, we have that in fact $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)$. If you combine multiple scalar Lagrange elements via an FESystem object, then $\Psi_i[\varphi] = \varphi(\hat{\mathbf x}_i)_{c(i)}$ where $c(i)$ is the result of the FiniteElement::system_to_component_index() function's return value's first component. In these two cases, $f_i$ is therefore simply the identity (in the scalar case) or a function that selects a particular vector component of its argument. On the other hand, for Raviart-Thomas elements, one would have that $f_i(\mathbf y) = \mathbf y \cdot \mathbf n_i$ where $\mathbf n_i$ is the normal vector of the face at which the shape function is defined.

    +

    Given all of this, what this function does is the following: If you input a list of values of a function $\varphi$ at all generalized support points (where each value is in fact a vector of values with as many components as the element has), then this function returns a vector of values obtained by applying the node functionals to these values. In other words, if you pass in $\{\varphi(\hat{\mathbf x}_i)\}_{i=0}^{N-1}$ then you will get out a vector $\{\Psi[\varphi]\}_{i=0}^{N-1}$ where $N$ equals dofs_per_cell.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-11-15 06:44:16.759558410 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFiniteElementData.html 2024-11-15 06:44:16.763558446 +0000 @@ -332,8 +332,8 @@
    [in]support_point_valuesAn array of size dofs_per_cell (which equals the number of points the get_generalized_support_points() function will return) where each element is a vector with as many entries as the element has vector components. This array should contain the values of a function at the generalized support points of the current element.
    - - + +
    [in]dofs_per_objectA vector that describes the number of degrees of freedom on geometrical objects for each dimension. This vector must have size dim+1, and entry 0 describes the number of degrees of freedom per vertex, entry 1 the number of degrees of freedom per line, etc. As an example, for the common $Q_1$ Lagrange element in 2d, this vector would have elements (1,0,0). On the other hand, for a $Q_3$ element in 3d, it would have entries (1,2,4,8).
    [in]n_componentsNumber of vector components of the element.
    [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
    [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
    [in]degreeThe maximal polynomial degree of any of the shape functions of this element in any variable on the reference element. For example, for the $Q_1$ element (in any space dimension), this would be one; this is so despite the fact that the element has a shape function of the form $\hat x\hat y$ (in 2d) and $\hat x\hat y\hat z$ (in 3d), which, although quadratic and cubic polynomials, are still only linear in each reference variable separately. The information provided by this variable is typically used in determining what an appropriate quadrature formula is.
    [in]conformityA variable describing which Sobolev space this element conforms to. For example, the $Q_p$ Lagrange elements (implemented by the FE_Q class) are $H^1$ conforming, whereas the Raviart-Thomas element (implemented by the FE_RaviartThomas class) is $H_\text{div}$ conforming; finally, completely discontinuous elements (implemented by the FE_DGQ class) are only $L_2$ conforming.
    [in]block_indicesAn argument that describes how the base elements of a finite element are grouped. The default value constructs a single block that consists of all dofs_per_cell degrees of freedom. This is appropriate for all "atomic" elements (including non-primitive ones) and these can therefore omit this argument. On the other hand, composed elements such as FESystem will want to pass a different value here.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-11-15 06:44:16.791558696 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFlatManifold.html 2024-11-15 06:44:16.791558696 +0000 @@ -473,7 +473,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -482,7 +482,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, dim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-11-15 06:44:16.843559161 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFullMatrix.html 2024-11-15 06:44:16.843559161 +0000 @@ -1096,8 +1096,8 @@
    -

    Return the l1-norm of the matrix, where $||M||_1 =
-\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

    +

    Return the l1-norm of the matrix, where $||M||_1 =
+\max_j \sum_i |M_{ij}|$ (maximum of the sums over columns).

    @@ -1117,8 +1117,8 @@
    -

    Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
-\sum_j |M_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$-norm of the matrix, where $||M||_\infty = \max_i
+\sum_j |M_{ij}|$ (maximum of the sums over rows).

    @@ -2071,7 +2071,7 @@

    A=Inverse(A). A must be a square matrix. Inversion of this matrix by Gauss-Jordan algorithm with partial pivoting. This process is well-behaved for positive definite matrices, but be aware of round-off errors in the indefinite case.

    In case deal.II was configured with LAPACK, the functions Xgetrf and Xgetri build an LU factorization and invert the matrix upon that factorization, providing best performance up to matrices with a few hundreds rows and columns.

    -

    The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

    +

    The numerical effort to invert an $n \times n$ matrix is of the order $n^3$.

    @@ -2115,7 +2115,7 @@
    -

    Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

    +

    Assign the Cholesky decomposition $A=:L L^T$ of the given matrix $A$ to *this, where $L$ is lower triangular matrix. The given matrix must be symmetric positive definite.

    ExcMatrixNotPositiveDefinite will be thrown in the case that the matrix is not positive definite.

    @@ -2139,7 +2139,7 @@ const Vector< number2 > & W&#href_anchor"memdoc"> -

    *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

    +

    *this(i,j) = $V(i) W(j)$ where $V,W$ are vectors of the same length.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-11-15 06:44:16.883559518 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunction.html 2024-11-15 06:44:16.887559553 +0000 @@ -261,7 +261,7 @@ C}^{n_\text{components}}$" src="form_455.png"/>. In such cases, you can choose a value different than the default double for the second template argument of this class: it describes the scalar type to be used for each component of your return values. It defaults to double, but in the example above, it could be set to std::complex<double>. step-58 is an example of this.

    Template Parameters
    - +
    dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
    dimThe space dimension of the range space within which the domain $\Omega$ of the function lies. Consequently, the function will be evaluated at objects of type Point<dim>.
    RangeNumberTypeThe scalar type of the vector space that is the range (or image) of this function. As discussed above, objects of the current type represent functions from ${\mathbb
   R}^\text{dim}$ to $S^{n_\text{components}}$ where $S$ is the underlying scalar type of the vector space. The type of $S$ is given by the RangeNumberType template argument.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-11-15 06:44:16.927559911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctionManifold.html 2024-11-15 06:44:16.927559911 +0000 @@ -553,7 +553,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the sub_manifold coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. The default implementation calls the get_gradient() method of the FunctionManifold::push_forward_function() member class. If you construct this object using the constructor that takes two string expression, then the default implementation of this method uses a finite difference scheme to compute the gradients(see the AutoDerivativeFunction() class for details), and you can specify the size of the spatial step size at construction time with the h parameter.

    Refer to the general documentation of this class for more information.

    @@ -720,24 +720,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -746,11 +746,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-11-15 06:44:16.963560232 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1CoordinateRestriction.html 2024-11-15 06:44:16.963560232 +0000 @@ -228,7 +228,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Detailed Description

    template<int dim>
    -class Functions::CoordinateRestriction< dim >

    This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

    +class Functions::CoordinateRestriction< dim >

    This class takes a function in dim + 1 dimensions and creates a new function in one dimension lower by restricting one of the coordinates to a given value. Mathematically this corresponds to taking a function $f = f(x, y, z)$, a fixed value, $Z$, and defining a new function (the restriction) $g = g(x, y) = f(x, y, Z)$. Using this class, this translates to

    double z = ...
    unsigned int restricted_direction = 2;
    @@ -236,7 +236,7 @@
    const SmartPointer< const Function< dim+1 > > function
    -

    The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

    +

    The dim-dimensional coordinates on the restriction are ordered starting from the restricted (dim + 1)-coordinate. In particular, this means that if the $y$-coordinate is locked to $Y$ in 3d, the coordinates are ordered as $(z, x)$ on the restriction: $g = g(z, x) = f(x, Y, z)$. This is the same convention as in BoundingBox::cross_section.

    Definition at line 50 of file function_restriction.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-11-15 06:44:17.003560590 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedTensorProductGridData.html 2024-11-15 06:44:17.003560590 +0000 @@ -243,7 +243,7 @@ x=(x,y,z)$" src="form_492.png"/> will find the box so that $x_k\le x\le x_{k+1}, y_l\le y\le
 y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

    This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

    -
    Note
    If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.
    +
    Note
    If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ and the same is true for the other data points in higher dimensions, you should use the InterpolatedUniformGridData class instead.

    If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

    Note
    The use of the related class InterpolatedUniformGridData is discussed in step-53.

    Dealing with large data sets

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-11-15 06:44:17.043560947 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1InterpolatedUniformGridData.html 2024-11-15 06:44:17.047560982 +0000 @@ -235,7 +235,7 @@ class Functions::InterpolatedUniformGridData< dim >

    A scalar function that computes its values by (bi-, tri-)linear interpolation from a set of point data that are arranged on a uniformly spaced tensor product mesh. In other words, considering the three-dimensional case, let there be points $x_0,\ldots, x_{K-1}$ that result from a uniform subdivision of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x = (x_{K-1}-x_0)/(K-1)$, and similarly $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at point $(x_k,y_l,z_m)^T$, then evaluating the function at a point $\mathbf x=(x,y,z)$ will find the box so that $x_k\le x\le x_{k+1},
 y_l\le y\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a trilinear interpolation of the data on this cell. Similar operations are done in lower dimensions.

    This class is most often used for either evaluating coefficients or right hand sides that are provided experimentally at a number of points inside the domain, or for comparing outputs of a solution on a finite element mesh against previously obtained data defined on a grid.

    -
    Note
    If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.
    +
    Note
    If you have a problem where the points $x_i$ are not equally spaced (e.g., they result from a computation on a graded mesh that is denser closer to one boundary), then use the InterpolatedTensorProductGridData class instead.

    If a point is requested outside the box defined by the end points of the coordinate arrays, then the function is assumed to simply extend by constant values beyond the last data point in each coordinate direction. (The class does not throw an error if a point lies outside the box since it frequently happens that a point lies just outside the box by an amount on the order of numerical roundoff.)

    Note
    The use of this class is discussed in step-53.

    Dealing with large data sets

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-11-15 06:44:17.083561304 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1LSingularityFunction.html 2024-11-15 06:44:17.083561304 +0000 @@ -229,7 +229,7 @@

    Detailed Description

    A function that solves the Laplace equation (with specific boundary values but zero right hand side) and that has a singularity at the center of the L-shaped domain in 2d (i.e., at the location of the re-entrant corner of this non-convex domain).

    The function is given in polar coordinates by $r^{\frac{2}{3}}
-\sin(\frac{2}{3} \phi)$ with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

    +\sin(\frac{2}{3} \phi)$" src="form_466.png"/> with a singularity at the origin and should be used with GridGenerator::hyper_L(). Here, $\phi$ is defined as the clockwise angle against the positive $x$-axis.

    This function is often used to illustrate that the solutions of the Laplace equation

    \[
   -\Delta u = 0
/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html	2024-11-15 06:44:17.119561626 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1PointRestriction.html	2024-11-15 06:44:17.119561626 +0000
@@ -231,7 +231,7 @@
 </table>
 <a name=

    Detailed Description

    template<int dim>
    -class Functions::PointRestriction< dim >

    This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

    +class Functions::PointRestriction< dim >

    This class creates a 1-dimensional function from a dim + 1 dimensional function by restricting dim of the coordinate values to a given point. Mathematically this corresponds to taking a function, $f = f(x, y, z)$, and a point $(Y, Z)$, and defining a new function $g = g(x) = f(x, Y, Z)$. Using this class, this translates to

    Point<2> point(y, z);
    unsigned int open_direction = 0;
    @@ -240,7 +240,7 @@
    const SmartPointer< const Function< dim+1 > > function
    -

    The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

    +

    The coordinates of the point will be expanded in the higher-dimensional functions coordinates starting from the open-direction (and wrapping around). In particular, if we restrict to a point $(Z, X)$ and choose to keep the y-direction open, the restriction that is created is the function $g(y) = f(X, y, Z)$. This is consistent with the convention in BoundingBox::cross_section.

    Definition at line 109 of file function_restriction.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-11-15 06:44:17.155561947 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Polynomial.html 2024-11-15 06:44:17.155561947 +0000 @@ -332,8 +332,8 @@ const std::vector< double > & coefficients&#href_anchor"memdoc"> -

    Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
-x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

    +

    Constructor. The coefficients and the exponents of the polynomial are passed as arguments. The Table<2, double> exponents has a number of rows equal to the number of monomials of the polynomial and a number of columns equal to dim. The i-th row of the exponents table contains the ${\alpha_{i,d}}$ exponents of the i-th monomial $a_{i}\prod_{d=1}^{dim}
+x_{d}^{\alpha_{i,d}}$. The i-th element of the coefficients vector contains the coefficient $a_{i}$ for the i-th monomial.

    Definition at line 2837 of file function_lib.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-11-15 06:44:17.195562305 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1RayleighKotheVortex.html 2024-11-15 06:44:17.195562305 +0000 @@ -226,7 +226,7 @@

    Detailed Description

    template<int dim>
    class Functions::RayleighKotheVortex< dim >

    A class that represents a time-dependent function object for a Rayleigh–Kothe vortex vector field. This is generally used as flow pattern in complex test cases for interface tracking methods (e.g., volume-of-fluid and level-set approaches) since it leads to strong rotation and elongation of the fluid [Blais2013].

    -

    The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

    +

    The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

    \[
 \Psi = \frac{1}{\pi} \sin^2 (\pi x) \sin^2 (\pi y) \cos \left( \pi
 \frac{t}{T} \right)
/usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-11-15 06:44:17.235562662 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Ellipsoid.html	2024-11-15 06:44:17.235562662 +0000
@@ -238,11 +238,11 @@
 <div class=

    template<int dim>
    class Functions::SignedDistance::Ellipsoid< dim >

    Signed-distance level set function to an ellipsoid defined by:

    -\[
+<picture><source srcset=\[
 \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} = 1
-\] +\]" src="form_533.png"/>

    -

    Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

    +

    Here, $c_i$ are the coordinates of the center of the ellipsoid and $R_i$ are the elliptic radii. This function is zero on the ellipsoid, negative inside the ellipsoid and positive outside the ellipsoid.

    Definition at line 144 of file function_signed_distance.h.

    Member Typedef Documentation

    @@ -462,9 +462,9 @@

    Evaluates the ellipsoid function:

    -\[
+<picture><source srcset=\[
 f(\vec{x}) = \sum_{i=1}^{dim} \frac{(x_i - c_i)^2}{R_i^2} - 1
-\] +\]" src="form_539.png"/>

    Definition at line 200 of file function_signed_distance.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-11-15 06:44:17.271562984 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Plane.html 2024-11-15 06:44:17.271562984 +0000 @@ -226,7 +226,7 @@

    Detailed Description

    template<int dim>
    -class Functions::SignedDistance::Plane< dim >

    Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

    +class Functions::SignedDistance::Plane< dim >

    Signed level set function of a plane in $\mathbb{R}^{dim}$: $\psi(x) = n \cdot (x - x_p)$. Here, $n$ is the plane normal and $x_p$ is a point in the plane. Thus, with respect to the direction of the normal, this function is positive above the plane, zero in the plane, and negative below the plane. If the normal is normalized, $\psi$ will be the signed distance to the closest point in the plane.

    Definition at line 104 of file function_signed_distance.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-11-15 06:44:17.311563341 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Rectangle.html 2024-11-15 06:44:17.311563341 +0000 @@ -226,7 +226,7 @@

    Detailed Description

    template<int dim>
    class Functions::SignedDistance::Rectangle< dim >

    Signed-distance level set function of a rectangle.

    -

    This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

    +

    This function is zero on the rectangle, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

    Contour surfaces of the signed distance function of a 3D rectangle are illustrated below:

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-11-15 06:44:17.343563626 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1Sphere.html 2024-11-15 06:44:17.343563626 +0000 @@ -226,9 +226,9 @@

    Detailed Description

    template<int dim>
    -class Functions::SignedDistance::Sphere< dim >

    Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

    -

    This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
-\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

    +class Functions::SignedDistance::Sphere< dim >

    Signed-distance level set function of a sphere: $\psi(x) = \| x - x^c \| - R$. Here, $x^c$ is the center of the sphere and $R$ is its radius. This function is thus zero on the sphere, negative "inside" the ball having the sphere as its boundary, and positive in the rest of $\mathbb{R}^{dim}$.

    +

    This function has gradient and Hessian equal to $\partial_i \psi(x) = (x - x^c)/\| x - x^c \|$, $\partial_i \partial_j \psi =
+\delta_{ij}/\| x - x^c \| - (x_i - x_i^c)(x_j - x_j^c)/\| x - x^c \|^3$, where $\delta_{ij}$ is the Kronecker delta function.

    Definition at line 48 of file function_signed_distance.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-11-15 06:44:17.383563984 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1SignedDistance_1_1ZalesakDisk.html 2024-11-15 06:44:17.383563984 +0000 @@ -227,8 +227,8 @@

    Detailed Description

    template<int dim>
    class Functions::SignedDistance::ZalesakDisk< dim >

    Signed-distance level set function of Zalesak's disk proposed in [zalesak1979fully].

    -

    It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
--\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

    +

    It is calculated by the set difference $\psi(x) = \max(\psi_{S}(x),
+-\psi_{N}(x))$ of the level set functions of a sphere $\psi_{S}$ and a rectangle $\psi_{N}$. This function is zero on the surface of the disk, negative "inside" and positive in the rest of $\mathbb{R}^{dim}$.

    Contour surfaces of the signed distance function of a 3D Zalesak's disk are illustrated below:

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 2024-11-15 06:44:17.419564305 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1Spherical.html 2024-11-15 06:44:17.419564305 +0000 @@ -229,7 +229,7 @@

    Detailed Description

    template<int dim>
    -class Functions::Spherical< dim >

    An abstract base class for a scalar-valued function $f=f(r,\theta,\phi)$ defined in spherical coordinates. This class wraps transformation of values, gradients and hessians from spherical coordinates to the Cartesian coordinate system used by the Function base class. Therefore derived classes only need to implement those functions in spherical coordinates (specifically svalue(), sgradient() and shessian() ). The convention for angles is the same as in GeometricUtilities::Coordinates.

    +class Functions::Spherical< dim >

    An abstract base class for a scalar-valued function $f=f(r,\theta,\phi)$ defined in spherical coordinates. This class wraps transformation of values, gradients and hessians from spherical coordinates to the Cartesian coordinate system used by the Function base class. Therefore derived classes only need to implement those functions in spherical coordinates (specifically svalue(), sgradient() and shessian() ). The convention for angles is the same as in GeometricUtilities::Coordinates.

    Note
    This function is currently only implemented for dim==3 .

    Definition at line 43 of file function_spherical.h.

    @@ -528,7 +528,7 @@

    Return the gradient in spherical coordinates.

    -

    The returned object should contain derivatives in the following order: $\{ f_{,r},\, f_{,\theta},\, f_{,\phi}\}$.

    +

    The returned object should contain derivatives in the following order: $\{ f_{,r},\, f_{,\theta},\, f_{,\phi}\}$.

    Definition at line 329 of file function_spherical.cc.

    @@ -559,8 +559,8 @@

    Return the Hessian in spherical coordinates.

    -

    The returned object should contain derivatives in the following order: $\{ f_{,rr},\, f_{,\theta\theta},\, f_{,\phi\phi},\, f_{,r\theta},\,
-f_{,r\phi},\, f_{,\theta\phi}\}$.

    +

    The returned object should contain derivatives in the following order: $\{ f_{,rr},\, f_{,\theta\theta},\, f_{,\phi\phi},\, f_{,r\theta},\,
+f_{,r\phi},\, f_{,\theta\phi}\}$.

    Definition at line 340 of file function_spherical.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-11-15 06:44:17.467564734 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classFunctions_1_1StokesLSingularity.html 2024-11-15 06:44:17.467564734 +0000 @@ -275,7 +275,7 @@

    Detailed Description

    A singular solution to Stokes' equations on a 2d L-shaped domain.

    -

    This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

    +

    This function satisfies $-\triangle \mathbf{u} + \nabla p = 0$ and represents a typical singular solution around a reentrant corner of an L-shaped domain that can be created using GridGenerator::hyper_L(). The velocity vanishes on the two faces of the re-entrant corner and $\nabla\mathbf{u}$ and $p$ are singular at the origin while they are smooth in the rest of the domain because they can be written as a product of a smooth function and the term $r^{\lambda-1}$ where $r$ is the radius and $\lambda \approx 0.54448$ is a fixed parameter.

    Taken from Houston, Schötzau, Wihler, proceeding ENUMATH 2003.

    Definition at line 245 of file flow_function.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-11-15 06:44:17.503565055 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classGridIn.html 2024-11-15 06:44:17.507565092 +0000 @@ -923,7 +923,7 @@
    Returns
    This function returns a struct containing some extra data stored by the ExodusII file that cannot be loaded into a Triangulation - see ExodusIIData for more information.
    -

    A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

    +

    A cell face in ExodusII can be in an arbitrary number of sidesets (i.e., it can have an arbitrary number of sideset ids) - however, a boundary cell face in deal.II has exactly one boundary id. All boundary faces that are not in a sideset are given the (default) boundary id of $0$. This function then groups sidesets together into unique sets and gives each one a boundary id. For example: Consider a single-quadrilateral mesh whose left side has no sideset id, right side has sideset ids $0$ and $1$, and whose bottom and top sides have sideset ids of $0$. The left face will have a boundary id of $0$, the top and bottom faces boundary ids of $1$, and the right face a boundary id of $2$. Hence the vector returned by this function in that case will be $\{\{\}, \{0\}, \{0, 1\}\}$.

    Definition at line 3772 of file grid_in.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-11-15 06:44:17.523565234 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classHouseholder.html 2024-11-15 06:44:17.523565234 +0000 @@ -150,10 +150,10 @@

    Detailed Description

    template<typename number>
    class Householder< number >

    QR-decomposition of a full matrix.

    -

    This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

    +

    This class computes the QR-decomposition of given matrix by the Householder algorithm. Then, the function least_squares() can be used to compute the vector $x$ minimizing $\|Ax-b\|$ for a given vector $b$. The QR decomposition of $A$ is useful for this purpose because the minimizer is given by the equation $x=(A^TA)^{-1}A^Tb=(R^TQ^TQR)^{-1}R^TQ^Tb$ which is easy to compute because $Q$ is an orthogonal matrix, and consequently $Q^TQ=I$. Thus, $x=(R^TR)^{-1}R^TQ^Tb=R^{-1}R^{-T}R^TQ^Tb=R^{-1}Q^Tb$. Furthermore, $R$ is triangular, so applying $R^{-1}$ to a vector only involves a backward or forward solve.

    Implementation details

    -

    The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

    -

    The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

    +

    The class does not in fact store the $Q$ and $R$ factors explicitly as matrices. It does store $R$, but the $Q$ factor is stored as the product of Householder reflections of the form $Q_i = I-v_i v_i^T$ where the vectors $v_i$ are so that they can be stored in the lower-triangular part of an underlying matrix object, whereas $R$ is stored in the upper triangular part.

    +

    The $v_i$ vectors and the $R$ matrix now are in conflict because they both want to use the diagonal entry of the matrix, but we can only store one in these positions, of course. Consequently, the entries $(v_i)_i$ are stored separately in the diagonal member variable.

    Note
    Instantiations for this template are provided for <float> and <double>; others can be generated in application programs (see the section on Template instantiations in the manual).

    Definition at line 79 of file householder.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-11-15 06:44:17.539565377 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIdentityMatrix.html 2024-11-15 06:44:17.539565377 +0000 @@ -147,13 +147,13 @@  

    Detailed Description

    -

    Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

    +

    Implementation of a simple class representing the identity matrix of a given size, i.e. a matrix with entries $A_{ij}=\delta_{ij}$. While it has the most important ingredients of a matrix, in particular that one can ask for its size and perform matrix-vector products with it, a matrix of this type is really only useful in two contexts: preconditioning and initializing other matrices.

    Initialization

    The main usefulness of this class lies in its ability to initialize other matrix, like this:

    std_cxx20::type_identity< T > identity
    -

    This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

    +

    This creates a $10\times 10$ matrix with ones on the diagonal and zeros everywhere else. Most matrix types, in particular FullMatrix and SparseMatrix, have conversion constructors and assignment operators for IdentityMatrix, and can therefore be filled rather easily with identity matrices.

    Preconditioning

    No preconditioning at all is equivalent to preconditioning with preconditioning with the identity matrix. deal.II has a specialized class for this purpose, PreconditionIdentity, than can be used in a context as shown in the documentation of that class. The present class can be used in much the same way, although without any additional benefit:

    SolverControl solver_control (1000, 1e-12);
    SolverCG<> cg (solver_control);
    /usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-11-15 06:44:17.563565591 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classImplicitQR.html 2024-11-15 06:44:17.563565591 +0000 @@ -181,7 +181,7 @@

    Detailed Description

    template<typename VectorType>
    -class ImplicitQR< VectorType >

    A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

    +class ImplicitQR< VectorType >

    A class to obtain the triangular $R$ matrix of the $A=QR$ factorization together with the matrix $A$ itself. The orthonormal matrix $Q$ is not stored explicitly, the name of the class. The multiplication with $Q$ can be represented as $Q=A R^{-1}$, whereas the multiplication with $Q^T$ is given by $Q^T=R^{-T}A^T$.

    The class is designed to update a given (possibly empty) QR factorization due to the addition of a new column vector. This is equivalent to constructing an orthonormal basis by the Gram-Schmidt procedure. The class also provides update functionality when the column is removed.

    The VectorType template argument may either be a parallel and serial vector, and only need to have basic operations such as additions, scalar product, etc. It also needs to have a copy-constructor.

    @@ -346,7 +346,7 @@
    -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -380,7 +380,7 @@
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -414,7 +414,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -448,7 +448,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-11-15 06:44:17.603565949 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIndexSet.html 2024-11-15 06:44:17.603565949 +0000 @@ -863,7 +863,7 @@
    -

    Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

    +

    Return whether the IndexSets are ascending with respect to MPI process number and 1:1, i.e., each index is contained in exactly one IndexSet (among those stored on the different processes), each process stores contiguous subset of indices, and the index set on process $p+1$ starts at the index one larger than the last one stored on process $p$. In case there is only one MPI process, this just means that the IndexSet is complete.

    Definition at line 1130 of file index_set.cc.

    @@ -1158,7 +1158,7 @@

    This command takes a "mask", i.e., a second index set of same size as the current one and returns the intersection of the current index set the mask, shifted to the index of an entry within the given mask. For example, if the current object is a an IndexSet object representing an index space [0,100) containing indices [20,40), and if the mask represents an index space of the same size but containing all 50 odd indices in this range, then the result will be an index set for a space of size 50 that contains those indices that correspond to the question "the how many'th entry in the mask are the indices [20,40). This will result in an index set of size 50 that contains the indices {11,12,13,14,15,16,17,18,19,20} (because, for example, the index 20 in the original set is not in the mask, but 21 is and corresponds to the 11th entry of the mask – the mask contains the elements {1,3,5,7,9,11,13,15,17,19,21,...}).

    In other words, the result of this operation is the intersection of the set represented by the current object and the mask, as seen within the mask. This corresponds to the notion of a view: The mask is a window through which we see the set represented by the current object.

    -

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

    +

    A typical case where this function is useful is as follows. Say, you have a block linear system in which you have blocks corresponding to variables $(u,p,T,c)$ (which you can think of as velocity, pressure, temperature, and chemical composition – or whatever other kind of problem you are currently considering in your own work). We solve this in parallel, so every MPI process has its own locally_owned_dofs index set that describes which among all $N_\text{dofs}$ degrees of freedom this process owns. Let's assume we have developed a linear solver or preconditioner that first solves the coupled $u$- $T$ system, and once that is done, solves the $p$- $c$ system. In this case, it is often useful to set up block vectors with only two components corresponding to the $u$ and $T$ components, and later for only the $p$- $c$ components of the solution. The question is which of the components of these 2-block vectors are locally owned? The answer is that we need to get a view of the locally_owned_dofs index set in which we apply a mask that corresponds to the variables we're currently interested in. For the $u$- $T$ system, we need a mask (corresponding to an index set of size $N_\text{dofs}$) that contains all indices of $u$ degrees of freedom as well as all indices of $T$ degrees of freedom. The resulting view is an index set of size $N_u+N_T$ that contains the indices of the locally owned $u$ and $T$ degrees of freedom.

    Definition at line 308 of file index_set.cc.

    @@ -1198,7 +1198,7 @@
    -

    Remove all elements contained in other from this set. In other words, if $x$ is the current object and $o$ the argument, then we compute $x
+<p>Remove all elements contained in <code>other</code> from this set. In other words, if <picture><source srcset=$x$ is the current object and $o$ the argument, then we compute $x
 \leftarrow x \backslash o$.

    Definition at line 473 of file index_set.cc.

    @@ -1943,7 +1943,7 @@
    -

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    +

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    is.add_range(0, N);

    This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

    if (my_index_set == complete_index_set(my_index_set.size())
    /usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-11-15 06:44:17.635566235 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classIntegratedLegendreSZ.html 2024-11-15 06:44:17.635566235 +0000 @@ -239,13 +239,13 @@

    Class implementing the integrated Legendre polynomials described in the PhD thesis of Sabine Zaglmayr.

    This class was written based upon the existing deal.II Legendre class as a base, but with the coefficients adjusted so that the recursive formula is for the integrated Legendre polynomials described in the PhD thesis of Sabine Zaglmayr. The polynomials can be generated recursively from:

      -
    • $L_{0}(x) = -1$ (added so that it can be generated recursively from 0)
    • -
    • $L_{1}(x) = x$
    • -
    • $L_{2}(x) = \frac{(x^2 - 1)}{2}$
    • -
    • $(n+1)L_{n+1} = (2n-1)L_{n} - (n-2)L_{n-1}$.
    • +
    • $L_{0}(x) = -1$ (added so that it can be generated recursively from 0)
    • +
    • $L_{1}(x) = x$
    • +
    • $L_{2}(x) = \frac{(x^2 - 1)}{2}$
    • +
    • $(n+1)L_{n+1} = (2n-1)L_{n} - (n-2)L_{n-1}$.

    However, it is also possible to generate them directly from the Legendre polynomials:

    -

    $L_{n} = \frac{l_{n} - l_{n-2}}{2n-1)}$

    +

    $L_{n} = \frac{l_{n} - l_{n-2}}{2n-1)}$

    Definition at line 46 of file polynomials_integrated_legendre_sz.h.

    Member Typedef Documentation

    @@ -1228,7 +1228,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 2024-11-15 06:44:17.667566521 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classKellyErrorEstimator.html 2024-11-15 06:44:17.667566521 +0000 @@ -174,7 +174,7 @@ \eta^2 = \sum_K \eta_K^2 \]" src="form_2276.png"/>

    -

    so that $\eta \approx \|\nabla (u-u_h)\|$ for the Laplace equation. The functions of this class compute a vector of values that corresponds to $\eta_K$ (i.e., the square root of the quantity above).

    +

    so that $\eta \approx \|\nabla (u-u_h)\|$ for the Laplace equation. The functions of this class compute a vector of values that corresponds to $\eta_K$ (i.e., the square root of the quantity above).

    In the paper of Ainsworth $ c_F=\frac {h_K}{24} $, but this factor is a bit esoteric, stemming from interpolation estimates and stability constants which may hold for the Poisson problem, but may not hold for more general situations. Alternatively, we consider the case when $c_F=\frac {h_F}{2p_F}$, where $h_F$ is the diameter of the face and $p_F=max(p^+,p^-)$ is the maximum polynomial degree of adjacent elements; or $c_F=h_K$. The choice between these factors is done by means of the enumerator, provided as the last argument in all functions.

    To perform the integration, use is made of the FEFaceValues and FESubfaceValues classes. The integration is performed by looping over all cells and integrating over faces that are not yet treated. This way we avoid integration on faces twice, once for each time we visit one of the adjacent cells. In a second loop over all cells, we sum up the contributions of the faces (which are the integrated square of the jumps times some factor) of each cell and take the square root.

    The integration is done using a quadrature formula on the face provided by the caller of the estimate() functions declared by this class. For linear trial functions (FE_Q(1)), QGauss with two points or even the QMidpoint rule might actually suffice. For higher order elements, it is necessary to utilize higher order quadrature formulae with fe.degree+1 Gauss points.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-11-15 06:44:17.743567199 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLAPACKFullMatrix.html 2024-11-15 06:44:17.747567235 +0000 @@ -2122,7 +2122,7 @@ FullMatrix< number > & eigenvectors&#href_anchor"memdoc">

    Compute eigenvalues and eigenvectors of a real symmetric matrix. Only eigenvalues in the interval $(\rm{lower\_bound}, \rm{upper\_bound}]$ are computed with the absolute tolerance $\rm abs\_accuracy$. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval $[a,b]$ of width less than or equal to $\rm{abs\_accuracy} + eps *
-\rm{max}(|a|,|b|)$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps\,|\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound},
+\rm{max}(|a|,|b|)$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps\,|\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound},
 \rm{upper\_bound}]$ will be stored in eigenvalues and the corresponding eigenvectors will be stored in the columns of eigenvectors, whose dimension is set accordingly.

    Note
    Calls the LAPACK function Xsyevx.
    @@ -2180,8 +2180,8 @@
  • itype = 2: $\mathbf A \cdot \mathbf B \cdot \mathbf x=\lambda \mathbf x$
  • itype = 3: $\mathbf B \cdot \mathbf A \cdot \mathbf x=\lambda \mathbf x$
  • -

    where $\mathbf A$ is this matrix. $\mathbf A$ and $\mathbf B$ are assumed to be symmetric, and $\mathbf B$ has to be positive definite. Only eigenvalues in the interval $(\rm{lower\_bound},
-\rm{upper\_bound}]$ are computed with the absolute tolerance $\rm{abs\_accuracy}$. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval $[a,b]$ of width less than or equal to $\rm{abs\_accuracy} + eps * \rm{max}( |a|,|b| )$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps \, |\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound}, \rm{upper\_bound}]$ will be stored in eigenvalues and the corresponding eigenvectors will be stored in eigenvectors, whose dimension is set accordingly.

    +

    where $\mathbf A$ is this matrix. $\mathbf A$ and $\mathbf B$ are assumed to be symmetric, and $\mathbf B$ has to be positive definite. Only eigenvalues in the interval $(\rm{lower\_bound},
+\rm{upper\_bound}]$ are computed with the absolute tolerance $\rm{abs\_accuracy}$. An approximate eigenvalue is accepted as converged when it is determined to lie in an interval $[a,b]$ of width less than or equal to $\rm{abs\_accuracy} + eps * \rm{max}( |a|,|b| )$, where $eps$ is the machine precision. If $\rm{abs\_accuracy}$ is less than or equal to zero, then $eps \, |\mathbf{T}|_1$ will be used in its place, where $|\mathbf{T}|_1$ is the 1-norm of the tridiagonal matrix obtained by reducing $\mathbf A$ to tridiagonal form. Eigenvalues will be computed most accurately when $\rm{abs\_accuracy}$ is set to twice the underflow threshold, not zero. After this routine has been called, all eigenvalues in $(\rm{lower\_bound}, \rm{upper\_bound}]$ will be stored in eigenvalues and the corresponding eigenvectors will be stored in eigenvectors, whose dimension is set accordingly.

    Note
    Calls the LAPACK function Xsygvx.

    Definition at line 2235 of file lapack_full_matrix.cc.

    @@ -3861,7 +3861,7 @@
    -

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    +

    The matrix $\mathbf U$ in the singular value decomposition $\mathbf U \cdot \mathbf S \cdot \mathbf V^T$.

    Definition at line 988 of file lapack_full_matrix.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-11-15 06:44:17.783567556 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1CUDAWrappers_1_1Vector.html 2024-11-15 06:44:17.783567556 +0000 @@ -934,7 +934,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 492 of file cuda_vector.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-11-15 06:44:17.835568021 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1ReadWriteVector.html 2024-11-15 06:44:17.835568021 +0000 @@ -323,7 +323,7 @@

    Detailed Description

    template<typename Number>
    -class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    +class LinearAlgebra::ReadWriteVector< Number >

    ReadWriteVector is intended to represent vectors in ${\mathbb R}^N$ for which it stores all or a subset of elements. The latter case in important in parallel computations, where $N$ may be so large that no processor can actually store all elements of a solution vector, but where this is also not necessary: one typically only has to store the values of degrees of freedom that live on cells that are locally owned plus potentially those degrees of freedom that live on ghost cells.

    This class provides access to individual elements to be read or written. However, it does not allow global operations such as taking the norm or dot products between vectors.

    Storing elements

    Most of the time, one will simply read from or write into a vector of the current class using the global numbers of these degrees of freedom. This is done using operator()() or operator[]() which call global_to_local() to transform the global index into a local one. In such cases, it is clear that one can only access elements of the vector that the current object indeed stores.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-11-15 06:44:17.899568592 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockSparseMatrix.html 2024-11-15 06:44:17.899568592 +0000 @@ -1010,7 +1010,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

    Definition at line 357 of file trilinos_tpetra_block_sparse_matrix.h.

    @@ -1042,7 +1042,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 372 of file trilinos_tpetra_block_sparse_matrix.h.

    @@ -2036,7 +2036,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2141,7 +2141,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2610,7 +2610,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2718,7 +2718,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-11-15 06:44:17.955569093 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1BlockVector.html 2024-11-15 06:44:17.955569093 +0000 @@ -1588,7 +1588,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1640,7 +1640,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1666,7 +1666,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1692,7 +1692,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-11-15 06:44:18.015569629 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparseMatrix.html 2024-11-15 06:44:18.015569629 +0000 @@ -1845,7 +1845,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

    The vector has to be initialized with the same IndexSet the matrix was initialized with.

    @@ -1870,7 +1870,7 @@ const Vector< Number, MemorySpace > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

    The vector u has to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector v has to be initialized with the same IndexSet that was used for the column indices of the matrix.

    In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-11-15 06:44:18.067570093 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1TpetraWrappers_1_1SparsityPattern.html 2024-11-15 06:44:18.067570093 +0000 @@ -477,7 +477,7 @@
    -

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    +

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

    Definition at line 102 of file trilinos_tpetra_sparsity_pattern.cc.

    @@ -509,7 +509,7 @@
    -

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    +

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

    Definition at line 113 of file trilinos_tpetra_sparsity_pattern.cc.

    @@ -799,7 +799,7 @@
    -

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    +

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

    Definition at line 234 of file trilinos_tpetra_sparsity_pattern.cc.

    @@ -831,7 +831,7 @@
    -

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    +

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    The vector n_entries_per_row specifies the number of entries in each row.

    Definition at line 248 of file trilinos_tpetra_sparsity_pattern.cc.

    @@ -1372,7 +1372,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 865 of file trilinos_tpetra_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-11-15 06:44:18.139570736 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1BlockVector.html 2024-11-15 06:44:18.139570736 +0000 @@ -1357,7 +1357,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    @@ -1576,7 +1576,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V and store the result in a full matrix matrix. This function computes the result by forming $A_{ij}=U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element!) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that inner product results in a square symmetric matrix and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate scalar product between locally owned degrees of freedom.
    @@ -1606,7 +1606,7 @@ const bool symmetric = false&#href_anchor"memdoc"> -

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    +

    Calculate the scalar product between each block of this vector and V using a metric tensor matrix. This function computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ and $V_j$ indicate the $i$th block (not element) of $U$ and the $j$th block of $V$, respectively. If symmetric is true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are symmetric matrices and almost half of the scalar products can be avoided.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    Note
    Internally, a single global reduction will be called to accumulate the scalar product between locally owned degrees of freedom.
    @@ -1641,7 +1641,7 @@ const Number b = Number(1.)&#href_anchor"memdoc"> -

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    +

    Set each block of this vector as follows: $V^i = s V^i + b \sum_{j} U_j A^{ji}$ where $V^i$ and $U_j$ indicate the $i$th block (not element) of $V$ and the $j$th block of $U$, respectively.

    Obviously, this function can only be used if all blocks of both vectors are of the same size.

    @@ -1845,7 +1845,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    @@ -1865,7 +1865,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-11-15 06:44:18.207571344 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearAlgebra_1_1distributed_1_1Vector.html 2024-11-15 06:44:18.207571344 +0000 @@ -1034,7 +1034,7 @@ const MPI_Comm comm_sm = MPI_COMM_SELF&#href_anchor"memdoc">

    Initialize vector with local_size locally-owned and ghost_size ghost degrees of freedoms.

    The optional argument comm_sm, which consists of processes on the same shared-memory domain, allows users have read-only access to both locally-owned and ghost values of processes combined in the shared-memory communicator. See the general documentation of this class for more information about this argument.

    -
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    +
    Note
    In the created underlying partitioner, the local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively. Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    @@ -1757,7 +1757,7 @@
    -

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    +

    Return the $l_2$ norm of the vector (i.e., the square root of the sum of the square of all entries among all processors).

    @@ -1777,7 +1777,7 @@
    -

    Return the square of the $l_2$ norm of the vector.

    +

    Return the square of the $l_2$ norm of the vector.

    @@ -2453,7 +2453,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-11-15 06:44:18.243571665 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classLinearOperator.html 2024-11-15 06:44:18.243571665 +0000 @@ -237,7 +237,7 @@
    std::function< void(Range &v, bool omit_zeroing_entries)> reinit_range_vector

    that store the knowledge how to initialize (resize + internal data structures) an arbitrary vector of the Range and Domain space.

    The primary purpose of this class is to provide syntactic sugar for complex matrix-vector operations and free the user from having to create, set up and handle intermediate storage locations by hand.

    -

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    +

    As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possible different) matrices. In order to construct a LinearOperator op that stores the knowledge of this operation, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    /usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-11-15 06:44:18.279571986 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classManifold.html 2024-11-15 06:44:18.279571986 +0000 @@ -204,11 +204,11 @@

    In the most essential use of manifolds, manifold descriptions are used to create a "point between other points". For example, when a triangulation creates a new vertex on a cell, face, or edge, it determines the new vertex' coordinates through the following function call:

    ...
    Point<spacedim> new_vertex = manifold.get_new_point (points,weights);
    ...
    -

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    +

    Here, points is a collection of points in spacedim dimension, and a collection of corresponding weights. The points in this context will then be the vertices of the cell, face, or edge, and the weights are typically one over the number of points when a new midpoint of the cell, face, or edge is needed. Derived classes then will implement the Manifold::get_new_point() function in a way that computes the location of this new point. In the simplest case, for example in the FlatManifold class, the function simply computes the arithmetic average (with given weights) of the given points. However, other classes do something differently; for example, the SphericalManifold class, which is used to describe domains that form (part of) the sphere, will ensure that, given the two vertices of an edge at the boundary, the new returned point will lie on the grand circle that connects the two points, rather than choosing a point that is half-way between the two points in ${\mathbb R}^d$.

    Note
    Unlike almost all other cases in the library, we here interpret the points to be in real space, not on the reference cell.

    Manifold::get_new_point() has a default implementation that can simplify this process somewhat: Internally, the function calls the Manifold::get_intermediate_point() to compute pair-wise intermediate points. Internally the Manifold::get_intermediate_point() calls the Manifold::project_to_manifold() function after computing the convex combination of the given points. This allows derived classes to only overload Manifold::project_to_manifold() for simple situations. This is often useful when describing manifolds that are embedded in higher dimensional space, e.g., the surface of a sphere. In those cases, the desired new point may be computed simply by the (weighted) average of the provided points, projected back out onto the sphere.

    Common use case: Computing tangent vectors

    -

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    +

    The second use of this class is in computing directions on domains and boundaries. For example, we may need to compute the normal vector to a face in order to impose the no-flow boundary condition $\mathbf u \cdot \mathbf n = 0$ (see the VectorTools::compute_no_normal_flux_constraints() as an example). Similarly, we may need normal vectors in the computation of the normal component of the gradient of the numerical solution in order to compute the jump in the gradient of the solution in error estimators (see, for example, the KellyErrorEstimator class).

    To make this possible, the Manifold class provides a member function (to be implemented by derived classes) that computes a "vector tangent to the manifold at one point, in direction of another point" via the Manifold::get_tangent_vector() function. For example, in 2d, one would use this function with the two vertices of an edge at the boundary to compute a "tangential" vector along the edge, and then get the normal vector by rotation by 90 degrees. In 3d, one would compute the two vectors "tangential" to the two edges of a boundary face adjacent to a boundary vertex, and then take the cross product of these two to obtain a vector normal to the boundary.

    For reasons that are more difficult to understand, these direction vectors are normalized in a very specific way, rather than to have unit norm. See the documentation of Manifold::get_tangent_vector(), as well as below, for more information.

    @@ -216,11 +216,11 @@

    A unified description

    The "real" way to understand what this class does is to see it in the framework of differential geometry. More specifically, differential geometry is fundamentally based on the assumption that two sufficiently close points are connected via a line of "shortest distance". This line is called a "geodesic", and it is selected from all other lines that connect the two points by the property that it is shortest if distances are measured in terms of the "metric" that describes a manifold. To give examples, recall that the geodesics of a flat manifold (implemented in the FlatManifold class) are simply the straight lines connecting two points, whereas for spherical manifolds (see the SphericalManifold class) geodesics between two points of same distance are the grand circles, and are in general curved lines when connecting two lines of different distance from the origin.

    In the following discussion, and for the purposes of implementing the current class, the concept of "metrics" that is so fundamental to differential geometry is no longer of great importance to us. Rather, everything can simply be described by postulating the existence of geodesics connecting points on a manifold.

    -

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    -

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    -

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    -

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
-\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    +

    Given geodesics, the operations discussed in the previous two sections can be described in a more formal way. In essence, they rely on the fact that we can assume that a geodesic is parameterized by a "time" like variable $t$ so that $\mathbf s(t)$ describes the curve and so that $\mathbf s(0)$ is the location of the first and $\mathbf s(1)$ the location of the second point. Furthermore, $\mathbf s(t)$ traces out the geodesic at constant speed, covering equal distance in equal time (as measured by the metric). Note that this parameterization uses time, not arc length to denote progress along the geodesic.

    +

    In this picture, computing a mid-point between points $\mathbf x_1$ and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply requires computing the point $\mathbf s(w_1)$. Computing a new point as a weighted average of more than two points can be done by considering pairwise geodesics, finding suitable points on the geodetic between the first two points, then on the geodetic between this new point and the third given point, etc.

    +

    Likewise, the "tangential" vector described above is simply the velocity vector, $\mathbf s'(t)$, evaluated at one of the end points of a geodesic (i.e., at $t=0$ or $t=1$). In the case of a flat manifold, the geodesic is simply the straight line connecting two points, and the velocity vector is just the connecting vector in that case. On the other hand, for two points on a spherical manifold, the geodesic is a grand circle, and the velocity vector is tangent to the spherical surface.

    +

    Note that if we wanted to, we could use this to compute the length of the geodesic that connects two points $\mathbf x_1$ and $\mathbf x_2$ by computing $\int_0^1 \|\mathbf s'(t)\| dt$ along the geodesic that connects them, but this operation will not be of use to us in practice. One could also conceive computing the direction vector using the "new point" operation above, using the formula $\mathbf s'(0)=\lim_{w\rightarrow 0}
+\frac{\mathbf s(w)-\mathbf s(0)}{w}$ where all we need to do is compute the new point $\mathbf s(w)$ with weights $w$ and $1-w$ along the geodesic connecting $\mathbf x_1$ and $\mathbf x_2$. The default implementation of the function does this, by evaluating the quotient for a small but finite weight $w$. In practice, however, it is almost always possible to explicitly compute the direction vector, i.e., without the need to numerically approximate the limit process, and derived classes should do so.

    Definition at line 285 of file manifold.h.

    Member Typedef Documentation

    @@ -648,11 +648,11 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    -

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    -

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    +

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    +

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-11-15 06:44:18.335572487 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMapping.html 2024-11-15 06:44:18.335572487 +0000 @@ -245,7 +245,7 @@ class Mapping< dim, spacedim >

    Abstract base class for mapping classes.

    This class declares the interface for the functionality to describe mappings from the reference (unit) cell to a cell in real space, as well as for filling the information necessary to use the FEValues, FEFaceValues, and FESubfaceValues classes. Concrete implementations of these interfaces are provided in derived classes.

    Mathematics of the mapping

    -

    The mapping is a transformation $\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
+<p>The mapping is a transformation <picture><source srcset=$\mathbf x = \mathbf F_K(\hat{\mathbf  x})$ which maps points $\hat{\mathbf x}$ in the reference cell $[0,1]^\text{dim}$ to points $\mathbf x$ in the actual grid cell $K\subset{\mathbb R}^\text{spacedim}$. Many of the applications of such mappings require the Jacobian of this mapping, $J(\hat{\mathbf x}) =
 \hat\nabla {\mathbf F}_K(\hat{\mathbf  x})$. For instance, if dim=spacedim=2, we have

    \[
 J(\hat{\mathbf  x}) = \left(\begin{matrix}
@@ -1003,10 +1003,10 @@
 </table>
 </div><div class=

    Compute information about the mapping from the reference cell to the real cell indicated by the first argument to this function. Derived classes will have to implement this function based on the kind of mapping they represent. It is called by FEValues::reinit().

    -

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

    +

    Conceptually, this function's represents the application of the mapping $\mathbf x=\mathbf F_K(\hat {\mathbf x})$ from reference coordinates $\mathbf\in [0,1]^d$ to real space coordinates $\mathbf x$ for a given cell $K$. Its purpose is to compute the following kinds of data:

      -
    • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
    • -
    • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.
    • +
    • Data that results from the application of the mapping itself, e.g., computing the location $\mathbf x_q = \mathbf F_K(\hat{\mathbf x}_q)$ of quadrature points on the real cell, and that is directly useful to users of FEValues, for example during assembly.
    • +
    • Data that is necessary for finite element implementations to compute their shape functions on the real cell. To this end, the FEValues::reinit() function calls FiniteElement::fill_fe_values() after the current function, and the output of this function serves as input to FiniteElement::fill_fe_values(). Examples of information that needs to be computed here for use by the finite element classes is the Jacobian of the mapping, $\hat\nabla \mathbf F_K(\hat{\mathbf x})$ or its inverse, for example to transform the gradients of shape functions on the reference cell to the gradients of shape functions on the real cell.

    The information computed by this function is used to fill the various member variables of the output argument of this function. Which of the member variables of that structure should be filled is determined by the update flags stored in the Mapping::InternalDataBase object passed to this function.

    An extensive discussion of the interaction between this function and FEValues can be found in the How Mapping, FiniteElement, and FEValues work together documentation topic.

    @@ -1265,37 +1265,37 @@

    The mapping kinds currently implemented by derived classes are:

    • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

      In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

    • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

      Gradients of scalar differentiable functions are transformed this way.

      In the case when dim=spacedim the previous formula reduces to

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

      because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

    • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

      -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

    @@ -1347,21 +1347,21 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      +

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

      @@ -1416,35 +1416,35 @@

    Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

    • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

    • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

      -\[
+u}(\hat{\mathbf x})$ so that

      +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

    @@ -1501,21 +1501,21 @@

    The mapping kinds currently implemented by derived classes are:

    • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

      ,

      where

      -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

    Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

    In the case when dim=spacedim the previous formula reduces to

    -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

    Parameters
    @@ -1565,40 +1565,40 @@
    -

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

    +

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

    The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

      -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

    • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html	2024-11-15 06:44:18.403573094 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingC1.html	2024-11-15 06:44:18.403573094 +0000
@@ -789,37 +789,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

      In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

    • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

      Gradients of scalar differentiable functions are transformed this way.

      In the case when dim=spacedim the previous formula reduces to

      -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

      because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

    • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

      -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

    @@ -871,21 +871,21 @@
    -

    Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      +

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

      • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

        Jacobians of spacedim-vector valued differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

      @@ -940,35 +940,35 @@

    Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

    • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

      -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

      +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

    • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

      -\[
+u}(\hat{\mathbf x})$ so that

      +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

    @@ -1025,21 +1025,21 @@

    The mapping kinds currently implemented by derived classes are:

    • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

      -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

      ,

      where

      -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

    Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

    In the case when dim=spacedim the previous formula reduces to

    -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

    Parameters
    @@ -1089,40 +1089,40 @@
    -

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

    +

    Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

    The mapping kinds currently implemented by derived classes are:

    • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

      -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

    • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

      -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

    • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

      -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

      +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html	2024-11-15 06:44:18.459573594 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingCartesian.html	2024-11-15 06:44:18.459573594 +0000
@@ -231,9 +231,9 @@
 <a name=

      Detailed Description

      template<int dim, int spacedim = dim>
      class MappingCartesian< dim, spacedim >

      A class providing a mapping from the reference cell to cells that are axiparallel, i.e., that have the shape of rectangles (in 2d) or boxes (in 3d) with edges parallel to the coordinate directions. The class therefore provides functionality that is equivalent to what, for example, MappingQ would provide for such cells. However, knowledge of the shape of cells allows this class to be substantially more efficient.

      -

      Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates $\hat {\mathbf
-x}$ to real coordinates $\mathbf x$ on each cell is of the form

      -\begin{align*}
+<p>Specifically, the mapping is meant for cells for which the mapping from the reference to the real cell is a scaling along the coordinate directions: The transformation from reference coordinates  <picture><source srcset=$\hat {\mathbf
+x}$ to real coordinates $\mathbf x$ on each cell is of the form

      +\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -242,10 +242,10 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1388.png"/>

      in 2d, and

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   {\mathbf x}(\hat {\mathbf x})
   =
   \begin{pmatrix}
@@ -255,9 +255,9 @@
   \end{pmatrix}
   \hat{\mathbf x}
   + {\mathbf v}_0
-\end{align*} +\end{align*}" src="form_1389.png"/>

      -

      in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

      +

      in 3d, where ${\mathbf v}_0$ is the bottom left vertex and $h_x,h_y,h_z$ are the extents of the cell along the axes.

      The class is intended for efficiency, and it does not do a whole lot of error checking. If you apply this mapping to a cell that does not conform to the requirements above, you will get strange results.

      Definition at line 78 of file mapping_cartesian.h.

      @@ -569,37 +569,37 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -653,21 +653,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -724,35 +724,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -811,21 +811,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -877,40 +877,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • /usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-11-15 06:44:18.511574059 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFE.html 2024-11-15 06:44:18.515574094 +0000 @@ -628,37 +628,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

        @@ -712,21 +712,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -783,35 +783,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -870,21 +870,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -936,40 +936,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html	2024-11-15 06:44:18.575574630 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingFEField.html	2024-11-15 06:44:18.575574630 +0000
@@ -699,37 +699,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -783,21 +783,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -854,35 +854,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -941,21 +941,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1007,40 +1007,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html	2024-11-15 06:44:18.623575059 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingManifold.html	2024-11-15 06:44:18.623575059 +0000
@@ -546,37 +546,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -630,21 +630,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -701,35 +701,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -788,21 +788,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -854,40 +854,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html	2024-11-15 06:44:18.687575631 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ.html	2024-11-15 06:44:18.687575631 +0000
@@ -266,7 +266,7 @@
 </table>
 <a name=

        Detailed Description

        template<int dim, int spacedim = dim>
        -class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        +class MappingQ< dim, spacedim >

        This class implements the functionality for polynomial mappings $Q_p$ of polynomial degree $p$ that will be used on all cells of the mesh. In order to get a genuine higher-order mapping for all cells, it is important to provide information about how interior edges and faces of the mesh should be curved. This is typically done by associating a Manifold with interior cells and edges. A simple example of this is discussed in the "Results" section of step-6; a full discussion of manifolds is provided in step-53. If manifolds are only attached to the boundaries of a domain, the current class with higher polynomial degrees will provide the same information as a mere MappingQ1 object. If you are working on meshes that describe a (curved) manifold embedded in higher space dimensions, i.e., if dim!=spacedim, then every cell is at the boundary of the domain you will likely already have attached a manifold object to all cells that can then also be used by the mapping classes for higher order mappings.

        Behavior along curved boundaries and with different manifolds

        For a number of applications, one only knows a manifold description of a surface but not the interior of the computational domain. In such a case, a FlatManifold object will be assigned to the interior entities that describes a usual planar coordinate system where the additional points for the higher order mapping are placed exactly according to a bi-/trilinear mapping. When combined with a non-flat manifold on the boundary, for example a circle bulging into the interior of a square cell, the two manifold descriptions are in general incompatible. For example, a FlatManifold defined solely through the cell's vertices would put an interior point located at some small distance epsilon away from the boundary along a straight line and thus in general outside the concave part of a circle. If the polynomial degree of MappingQ is sufficiently high, the transformation from the reference cell to such a cell would in general contain inverted regions close to the boundary.

        In order to avoid this situation, this class applies an algorithm for making this transition smooth using a so-called transfinite interpolation that is essentially a linear blend between the descriptions along the surrounding entities. In the algorithm that computes additional points, the compute_mapping_support_points() method, all the entities of the cells are passed through hierarchically, starting from the lines to the quads and finally hexes. Points on objects higher up in the hierarchy are obtained from the manifold associated with that object, taking into account all the points previously computed by the manifolds associated with the lower-dimensional objects, not just the vertices. If only a line is assigned a curved boundary but the adjacent quad is on a flat manifold, the flat manifold on the quad will take the points on the deformed line into account when interpolating the position of the additional points inside the quad and thus always result in a well-defined transformation.

        @@ -713,37 +713,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

        @@ -797,21 +797,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -868,35 +868,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -955,21 +955,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1021,40 +1021,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html	2024-11-15 06:44:18.759576274 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1.html	2024-11-15 06:44:18.759576274 +0000
@@ -644,37 +644,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -726,21 +726,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -795,35 +795,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -880,21 +880,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -944,40 +944,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html	2024-11-15 06:44:18.823576845 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ1Eulerian.html	2024-11-15 06:44:18.823576845 +0000
@@ -782,37 +782,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -864,21 +864,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -933,35 +933,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -1018,21 +1018,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1082,40 +1082,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html	2024-11-15 06:44:18.895577488 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQCache.html	2024-11-15 06:44:18.895577488 +0000
@@ -517,7 +517,7 @@
           <td class= const std::function< std::vector< Point< spacedim > >(const typename Triangulation< dim, spacedim >::cell_iterator &)> & compute_points_on_cell&#href_anchor"memdoc"> -

        Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

        +

        Initialize the data cache by letting the function given as an argument provide the mapping support points for all cells (on all levels) of the given triangulation. The function must return a vector of Point<spacedim> whose length is the same as the size of the polynomial space, $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping, and it must be in the order the mapping or FE_Q sort their points, i.e., all $2^\text{dim}$ vertex points first, then the points on the lines, quads, and hexes according to the usual hierarchical numbering. No attempt is made to validate these points internally, except for the number of given points.

        Note
        If multiple threads are enabled, this function will run in parallel, invoking the function passed in several times. Thus, in case MultithreadInfo::n_threads()>1, the user code must make sure that the function, typically a lambda, does not write into data shared with other threads.
        The cache is invalidated upon the signal Triangulation::Signals::any_change of the underlying triangulation.
        @@ -1003,37 +1003,37 @@

        The mapping kinds currently implemented by derived classes are:

        • mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

          In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

        • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

          Gradients of scalar differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

          because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

        • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

          -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

        @@ -1085,21 +1085,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -1154,35 +1154,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -1239,21 +1239,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1303,40 +1303,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html	2024-11-15 06:44:18.963578095 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQEulerian.html	2024-11-15 06:44:18.963578095 +0000
@@ -813,37 +813,37 @@
 <p>The mapping kinds currently implemented by derived classes are: </p><ul>
 <li>
 <p class=mapping_contravariant: maps a vector field on the reference cell to the physical cell through the Jacobian:

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})\hat{\mathbf  u}(\hat{\mathbf
 x}).
-\] +\]" src="form_1361.png"/>

        In physics, this is usually referred to as the contravariant transformation. Mathematically, it is the push forward of a vector field.

      • mapping_covariant: maps a field of one-forms on the reference cell to a field of one-forms on the physical cell. (Theoretically this would refer to a DerivativeForm<1,dim,1> but we canonically identify this type with a Tensor<1,dim>). Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}\hat{\mathbf u}(\hat{\mathbf  x}).
-\] +\]" src="form_1362.png"/>

        Gradients of scalar differentiable functions are transformed this way.

        In the case when dim=spacedim the previous formula reduces to

        -\[
+<picture><source srcset=\[
 \mathbf u(\mathbf x) = J(\hat{\mathbf  x})^{-T}\hat{\mathbf
 u}(\hat{\mathbf  x})
-\] +\]" src="form_1363.png"/>

        because we assume that the mapping $\mathbf F_K$ is always invertible, and consequently its Jacobian $J$ is an invertible matrix.

      • mapping_piola: A field of dim-1-forms on the reference cell is also represented by a vector field, but again transforms differently, namely by the Piola transform

        -\[
+<picture><source srcset=\[
  \mathbf u(\mathbf x) = \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf x}) \hat{\mathbf  u}(\hat{\mathbf x}).
-\] +\]" src="form_1364.png"/>

      @@ -895,21 +895,21 @@
      -

      Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        +

        Transform a field of differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T} = \nabla \mathbf u$ and $\hat{\mathbf  T} = \hat \nabla \hat{\mathbf  u}$, with $\mathbf u$ a vector field. The mapping kinds currently implemented by derived classes are:

        • mapping_covariant: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  T}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1367.png"/>

          Jacobians of spacedim-vector valued differentiable functions are transformed this way.

          In the case when dim=spacedim the previous formula reduces to

          -\[
+<picture><source srcset=\[
 \mathbf T(\mathbf x) = \hat{\mathbf  u}(\hat{\mathbf  x})
                        J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1368.png"/>

        @@ -964,35 +964,35 @@

      Transform a tensor field from the reference cell to the physical cell. These tensors are usually the Jacobians in the reference cell of vector fields that have been pulled back from the physical cell. The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_gradient: it assumes $\mathbf u(\mathbf x)
-= J \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_contravariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x)
+= J \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1370.png"/>

      • -mapping_covariant_gradient: it assumes $\mathbf u(\mathbf x) =
-J^{-T} \hat{\mathbf  u}$ so that

        -\[
+<code>mapping_covariant_gradient:</code> it assumes  <picture><source srcset=$\mathbf u(\mathbf x) =
+J^{-T} \hat{\mathbf  u}$ so that

        +\[
 \mathbf T(\mathbf x) =
 J(\hat{\mathbf  x})^{-T} \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1372.png"/>

      • -mapping_piola_gradient: it assumes $\mathbf u(\mathbf x) =
+<code>mapping_piola_gradient:</code> it assumes   <picture><source srcset=$\mathbf u(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J(\hat{\mathbf x}) \hat{\mathbf
-u}(\hat{\mathbf x})$ so that

        -\[
+u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J(\hat{\mathbf  x}) \hat{\mathbf  T}(\hat{\mathbf  x})
 J(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1374.png"/>

      @@ -1049,21 +1049,21 @@

      The mapping kinds currently implemented by derived classes are:

      • mapping_covariant_gradient: maps a field of forms on the reference cell to a field of forms on the physical cell. Mathematically, it is the pull back of the differential form

        -\[
+<picture><source srcset=\[
 \mathbf T_{ijk}(\mathbf x) = \hat{\mathbf  T}_{iJK}(\hat{\mathbf  x})
-J_{jJ}^{\dagger} J_{kK}^{\dagger}\] +J_{jJ}^{\dagger} J_{kK}^{\dagger}\]" src="form_1375.png"/>

        ,

        where

        -\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
+<picture><source srcset=\[ J^{\dagger} = J(\hat{\mathbf  x})(J(\hat{\mathbf  x})^{T}
 J(\hat{\mathbf  x}))^{-1}.
-\] +\]" src="form_1376.png"/>

      Hessians of spacedim-vector valued differentiable functions are transformed this way (After subtraction of the product of the derivative with the Jacobian gradient).

      In the case when dim=spacedim the previous formula reduces to

      -\[J^{\dagger} = J^{-1}\] +\[J^{\dagger} = J^{-1}\]

      Parameters
      @@ -1113,40 +1113,40 @@
      -

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
-\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
-u}_I$, with $\mathbf u_i$ a vector field.

      +

      Transform a field of 3-differential forms from the reference cell to the physical cell. It is useful to think of $\mathbf{T}_{ijk} = D^2_{jk}
+\mathbf u_i$ and $\mathbf{\hat T}_{IJK} = \hat D^2_{JK} \mathbf{\hat
+u}_I$, with $\mathbf u_i$ a vector field.

      The mapping kinds currently implemented by derived classes are:

      • -mapping_contravariant_hessian: it assumes $\mathbf u_i(\mathbf x)
-= J_{iI} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_contravariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x)
+= J_{iI} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1382.png"/>

      • -mapping_covariant_hessian: it assumes $\mathbf u_i(\mathbf x) =
-J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        -\[
+<code>mapping_covariant_hessian:</code> it assumes  <picture><source srcset=$\mathbf u_i(\mathbf x) =
+J_{iI}^{-T} \hat{\mathbf  u}_I$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 J_iI(\hat{\mathbf  x})^{-1} \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
 J_{jJ}(\hat{\mathbf  x})^{-1} J_{kK}(\hat{\mathbf  x})^{-1}.
-\] +\]" src="form_1384.png"/>

      • -mapping_piola_hessian: it assumes $\mathbf u_i(\mathbf x) =
+<code>mapping_piola_hessian:</code> it assumes   <picture><source srcset=$\mathbf u_i(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})} J_{iI}(\hat{\mathbf x})
-\hat{\mathbf u}(\hat{\mathbf x})$ so that

        -\[
+\hat{\mathbf u}(\hat{\mathbf x})$ so that

        +\[
 \mathbf T_{ijk}(\mathbf x) =
 \frac{1}{\text{det}\;J(\hat{\mathbf x})}
 J_{iI}(\hat{\mathbf  x}) \hat{\mathbf  T}_{IJK}(\hat{\mathbf  x})
/usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html	2024-11-15 06:44:18.991578346 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classMappingQ_1_1InternalData.html	2024-11-15 06:44:18.991578346 +0000
@@ -387,7 +387,7 @@
       </table>
 </div><div class=

        Number of shape functions. If this is a Q1 mapping, then it is simply the number of vertices per cell. However, since also derived classes use this class (e.g. the Mapping_Q() class), the number of shape functions may also be different.

        -

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        +

        In general, it is $(p+1)^\text{dim}$, where $p$ is the polynomial degree of the mapping.

        Definition at line 372 of file mapping_q.h.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-11-15 06:44:19.099579310 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteFaceQuadratureGenerator.html 2024-11-15 06:44:19.099579310 +0000 @@ -401,7 +401,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2000 of file quadrature_generator.cc.

      @@ -431,7 +431,7 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      Definition at line 2008 of file quadrature_generator.cc.

      @@ -461,8 +461,8 @@
      -

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 2017 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-11-15 06:44:19.119579489 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1DiscreteQuadratureGenerator.html 2024-11-15 06:44:19.119579489 +0000 @@ -388,7 +388,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1871 of file quadrature_generator.cc.

      @@ -418,7 +418,7 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      Definition at line 1880 of file quadrature_generator.cc.

      @@ -448,8 +448,8 @@
      -

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      -
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.
      +

      Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

      +
      Note
      The normal at the quadrature points will be parallel to $\nabla \psi$.

      Definition at line 1889 of file quadrature_generator.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-11-15 06:44:19.203580240 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEImmersedSurfaceValues.html 2024-11-15 06:44:19.203580240 +0000 @@ -520,7 +520,7 @@ const unsigned int quadrature_point&#href_anchor"memdoc">

      Returns the surface gradient of the shape function with index function_no at the quadrature point with index quadrature_point.

      -

      The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

      +

      The surface gradient is defined as the projection of the gradient to the tangent plane of the surface: $ \nabla u - (n \cdot \nabla u) n $, where $n$ is the unit normal to the surface.

      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients | update_normal_vectors flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 137 of file fe_immersed_values.cc.

      @@ -695,7 +695,7 @@

      If the shape function is vector-valued, then this returns the only non-zero component. If the shape function has more than one non-zero component (i.e. it is not primitive), then throw an exception of type ExcShapeFunctionNotPrimitive. In that case, use the shape_value_component() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      iNumber of the shape function $\varphi_i$ to be evaluated. Note that this number runs from zero to dofs_per_cell, even in the case of an FEFaceValues or FESubfaceValues object.
      q_pointNumber of the quadrature point at which function is to be evaluated
      @@ -734,7 +734,7 @@

      Compute one vector component of the value of a shape function at a quadrature point. If the finite element is scalar, then only component zero is allowed and the return value equals that of the shape_value() function. If the finite element is vector valued but all shape functions are primitive (i.e. they are non-zero in only one component), then the value returned by shape_value() equals that of this function for exactly one component. This function is therefore only of greater interest if the shape function is not primitive, but then it is necessary since the other function cannot be used.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      componentvector component to be evaluated.
      @@ -771,7 +771,7 @@

      The same holds for the arguments of this function as for the shape_value() function.

      Parameters
      - +
      iNumber of the shape function $\varphi_i$ to be evaluated.
      iNumber of the shape function $\varphi_i$ to be evaluated.
      q_pointNumber of the quadrature point at which function is to be evaluated.
      @@ -965,11 +965,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      [out]valuesThe values of the function specified by fe_function at the quadrature points of the current cell. The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the values of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the solution vector.
      -
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      values[q] will contain the value of the field described by fe_function at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 487 of file fe_values_base.cc.

      @@ -999,7 +999,7 @@

      This function does the same as the other get_function_values(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      values[q] is a vector of values of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by values[q] equals the number of components of the finite element, i.e. values[q](c) returns the value of the $c$th vector component at the $q$th quadrature point.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_values flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 505 of file fe_values_base.cc.

      @@ -1160,11 +1160,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]gradientsThe gradients of the function specified by fe_function at the quadrature points of the current cell. The gradients are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the gradients of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      +
      Postcondition
      gradients[q] will contain the gradient of the field described by fe_function at the $q$th quadrature point. gradients[q][d] represents the derivative in coordinate direction $d$ at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1196,7 +1196,7 @@

      This function does the same as the other get_function_gradients(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      gradients[q] is a vector of gradients of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by gradients[q] equals the number of components of the finite element, i.e. gradients[q][c] returns the gradient of the $c$th vector component at the $q$th quadrature point. Consequently, gradients[q][c][d] is the derivative in coordinate direction $d$ of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_gradients flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 683 of file fe_values_base.cc.

      @@ -1303,11 +1303,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]hessiansThe Hessians of the function specified by fe_function at the quadrature points of the current cell. The Hessians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Hessians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      +
      Postcondition
      hessians[q] will contain the Hessian of the field described by fe_function at the $q$th quadrature point. hessians[q][i][j] represents the $(i,j)$th component of the matrix of second derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1344,7 +1344,7 @@

      This function does the same as the other get_function_hessians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      hessians[q] is a vector of Hessians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by hessians[q] equals the number of components of the finite element, i.e. hessians[q][c] returns the Hessian of the $c$th vector component at the $q$th quadrature point. Consequently, hessians[q][c][i][j] is the $(i,j)$th component of the matrix of second derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 786 of file fe_values_base.cc.

      @@ -1451,11 +1451,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      [out]laplaciansThe Laplacians of the function specified by fe_function at the quadrature points of the current cell. The Laplacians are computed in real space (as opposed to on the unit cell). The object is assume to already have the correct size. The data type stored by this output vector must be what you get when you multiply the Laplacians of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument). This happens to be equal to the type of the elements of the input vector.
      -
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] will contain the Laplacian of the field described by fe_function at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q]=trace(hessians[q]), where hessians would be the output of the get_function_hessians() function.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      @@ -1489,7 +1489,7 @@

      This function does the same as the other get_function_laplacians(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      +
      Postcondition
      laplacians[q] is a vector of Laplacians of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by laplacians[q] equals the number of components of the finite element, i.e. laplacians[q][c] returns the Laplacian of the $c$th vector component at the $q$th quadrature point.
      For each component of the output vector, there holds laplacians[q][c]=trace(hessians[q][c]), where hessians would be the output of the get_function_hessians() function.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_hessians flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1636,11 +1636,11 @@
      Parameters
      - +
      [in]fe_functionA vector of values that describes (globally) the finite element function that this function should evaluate at the quadrature points of the current cell.
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      [out]third_derivativesThe third derivatives of the function specified by fe_function at the quadrature points of the current cell. The third derivatives are computed in real space (as opposed to on the unit cell). The object is assumed to already have the correct size. The data type stored by this output vector must be what you get when you multiply the third derivatives of shape function times the type used to store the values of the unknowns $U_j$ of your finite element vector $U$ (represented by the fe_function argument).
      -
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      +
      Postcondition
      third_derivatives[q] will contain the third derivatives of the field described by fe_function at the $q$th quadrature point. third_derivatives[q][i][j][k] represents the $(i,j,k)$th component of the 3rd order tensor of third derivatives at quadrature point $q$.
      Note
      The actual data type of the input vector may be either a Vector<T>, BlockVector<T>, or one of the PETSc or Trilinos vector wrapper classes. It represents a global vector of DoF values associated with the DoFHandler object with which this FEValues object was last initialized.
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.
      @@ -1677,7 +1677,7 @@

      This function does the same as the other get_function_third_derivatives(), but applied to multi-component (vector-valued) elements. The meaning of the arguments is as explained there.

      -
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      +
      Postcondition
      third_derivatives[q] is a vector of third derivatives of the field described by fe_function at the $q$th quadrature point. The size of the vector accessed by third_derivatives[q] equals the number of components of the finite element, i.e. third_derivatives[q][c] returns the third derivative of the $c$th vector component at the $q$th quadrature point. Consequently, third_derivatives[q][c][i][j][k] is the $(i,j,k)$th component of the tensor of third derivatives of the $c$th vector component of the vector field at quadrature point $q$ of the current cell.
      Note
      For this function to work properly, the underlying FEValues, FEFaceValues, or FESubfaceValues object on which you call it must have computed the information you are requesting. To do so, the update_3rd_derivatives flag must be an element of the list of UpdateFlags that you passed to the constructor of this object. See The interplay of UpdateFlags, Mapping, and FiniteElement in FEValues for more information.

      Definition at line 1006 of file fe_values_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-11-15 06:44:19.239580561 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEInterfaceValues.html 2024-11-15 06:44:19.239580561 +0000 @@ -173,11 +173,11 @@

      Detailed Description

      template<int dim>
      -class NonMatching::FEInterfaceValues< dim >

      This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      -\[
+class NonMatching::FEInterfaceValues< dim ></div><p>This class is intended to facilitate assembling interface terms on faces in immersed (in the sense of cut) finite element methods. These types of terms occur mainly in cut discontinuous Galerkin methods. This class works analogously to <a class=NonMatching::FEValues. The domain is assumed to be described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$, and this class assumes that we want to integrate over two different regions of each face, $F$:

      +\[
 N = \{x \in F : \psi(x) < 0 \}, \\
 P = \{x \in F : \psi(x) > 0 \},
-\] +\]" src="form_2117.png"/>

      which we as before refer to as the "inside" and "outside" regions of the face.

      @@ -210,7 +210,7 @@
      }
      }
      -

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      +

      To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. The immersed quadrature rules are only generated if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEInterfaceValues object created with a quadrature over the reference cell: $[0, 1]^{dim-1}$.

      Definition at line 488 of file fe_values.h.

      Member Typedef Documentation

      @@ -364,7 +364,7 @@ - + @@ -502,7 +502,7 @@
      mapping_collectionCollection of Mappings to be used.
      fe_collectionCollection of FiniteElements to be used.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collectionCollection of Quadrature rules over $[0, 1]^{dim-1}$ that should be used when a face is not intersected and we do not need to generate immersed quadrature rules.
      q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
      mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
      region_update_flagsStruct storing UpdateFlags for the inside/outside region of the cell.
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 537 of file fe_values.cc.

    @@ -525,7 +525,7 @@
    -

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEInterfaceValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 549 of file fe_values.cc.

    @@ -556,7 +556,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim-1}$. These will be used on the non-intersected cells.

    Definition at line 397 of file fe_values.cc.

    @@ -847,7 +847,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 702 of file fe_values.h.

    @@ -874,7 +874,7 @@
    -

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEInterfaceValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 711 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-11-15 06:44:19.267580811 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FEValues.html 2024-11-15 06:44:19.271580847 +0000 @@ -177,17 +177,17 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FEValues< dim >

    This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, $\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    -\[
+class NonMatching::FEValues< dim ></div><p>This class is intended to facilitate assembling in immersed (in the sense of cut) finite element methods when the domain is described by a level set function, <picture><source srcset=$\psi : \mathbb{R}^{dim} \to \mathbb{R}$. In this type of method, we typically need to integrate over 3 different regions of each cell, $K$:

    +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2114.png"/>

    Thus we need quadrature rules for these 3 regions:

    -

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    +

    As in the QuadratureGenerator class, we refer to $N$, $P$, and $S$ as the inside, outside, and surface regions. The constructor of this class takes a discrete level set function described by a DoFHandler and a Vector. When the reinit() function is called, the QuadratureGenerator will be called in the background to create these immersed quadrature rules. This class then creates FEValues objects for the inside/outside regions and an FEImmersedSurfaceValues object for the surface region. These objects can then be accessed through one of the functions: get_inside_fe_values(), get_outside_fe_values(), or get_surface_fe_values(). Since a cut between a cell and the domain can be arbitrarily small, the underlying algorithm may generate a quadrature rule with 0 points. This can, for example, happen if the relative size of the cut is similar to the floating-point accuracy. Since the FEValues-like objects are not allowed to contain 0 points, the object that get_inside/outside/surface_fe_values() returns is wrapped in a std::optional. This requires us to check if the returned FEValues-like object contains a value before we use it:

    for (const auto &cell : dof_handler.active_cell_iterators())
    {
    @@ -208,7 +208,7 @@
    }
    std::optional<::FEValues< dim > > fe_values_inside
    Definition fe_values.h:397
    -

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    +

    Of course, it is somewhat expensive to generate the immersed quadrature rules and create FEValues objects with the generated quadratures. To reduce the amount of work, the reinit() function of this class uses the MeshClassifier passed to the constructor to check how the incoming cell relates to the level set function. It only generates the immersed quadrature rules if the cell is intersected. If the cell is completely inside or outside, it returns a cached FEValues object created with a quadrature over the reference cell: $[0, 1]^{dim}$.

    Definition at line 143 of file fe_values.h.

    Member Typedef Documentation

    @@ -359,7 +359,7 @@ - + @@ -464,7 +464,7 @@
    mapping_collectionCollection of Mappings to be used.
    fe_collectionCollection of FiniteElements to be used.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collectionCollection of Quadrature rules over $[0, 1]^{dim}$ that should be used when a cell is not intersected and we do not need to generate immersed quadrature rules.
    q_collection_1dCollection of 1-dimensional quadrature rules used to generate the immersed quadrature rules. See the QuadratureGenerator class.
    mesh_classifierObject used to determine when the immersed quadrature rules need to be generated.
    region_update_flagsStruct storing UpdateFlags for the inside/outside/surface region of the cell.
    -

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the inside region of the cell: $\{x \in K : \psi(x) < 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the outside domain, the returned optional will not contain a value.

    Definition at line 306 of file fe_values.cc.

    @@ -487,7 +487,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the outside region of the cell: $\{x \in K : \psi(x) > 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is completely located in the inside domain, the returned optional will not contain a value.

    Definition at line 318 of file fe_values.cc.

    @@ -510,7 +510,7 @@
    -

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    +

    Return an FEValues object reinitialized with a quadrature for the surface region of the cell: $\{x \in K : \psi(x) = 0 \}$.

    Note
    If the quadrature rule over the region is empty, e.g. because the cell is not intersected, the returned optional will not contain a value.

    Definition at line 330 of file fe_values.cc.

    @@ -583,7 +583,7 @@
    -

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    +

    Do work common to the constructors. The incoming QCollection should be quadratures integrating over $[0, 1]^{dim}$. These will be used on the non-intersected cells.

    Definition at line 101 of file fe_values.cc.

    @@ -800,7 +800,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the inside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is INSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -829,7 +829,7 @@
    -

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    +

    For each element in the FECollection passed to the constructor, this object contains an FEValues object created with a quadrature rule over the full reference cell: $[0, 1]^{dim}$ and UpdateFlags for the outside region. Thus, these optionals should always contain a value.

    When LocationToLevelSet of the cell is OUTSIDE (and we do not need to generate an immersed quadrature), we return the FEValues object in this container corresponding to the cell's active_fe_index.

    This container is a std::deque, which is compatible with the FEValues class that does not have a copy-constructor.

    @@ -858,7 +858,7 @@
    -

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the inside region, $\{x \in B : \psi(x) < 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 397 of file fe_values.h.

    @@ -885,7 +885,7 @@
    -

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEValues object created with a quadrature rule integrating over the outside region, $\{x \in B : \psi(x) > 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 406 of file fe_values.h.

    @@ -912,7 +912,7 @@
    -

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    +

    FEImmersedSurfaceValues object created with a quadrature rule integrating over the surface region, $\{x \in B : \psi(x) = 0 \}$, that was generated in the last call to reinit(..). If the cell in the last call was not intersected or if 0 quadrature points were generated, this optional will not contain a value.

    Definition at line 415 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-11-15 06:44:19.291581025 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator.html 2024-11-15 06:44:19.291581025 +0000 @@ -149,7 +149,7 @@

    Detailed Description

    template<int dim>
    -class NonMatching::FaceQuadratureGenerator< dim >

    This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    +class NonMatching::FaceQuadratureGenerator< dim >

    This class creates immersed quadrature rules over a face, $F$, of a BoundingBox, when the domain is described by a level set function, $\psi$.

    In the same way as in the QuadratureGenerator class, this class generates quadrature rules to integrate over 3 different regions of the face, $F \subset \mathbb{R}^{dim}$:

    \[
 N = \{x \in F : \psi(x) < 0 \}, \\
@@ -157,8 +157,8 @@
 S = \{x \in F : \psi(x) = 0 \},
 \]

    -

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    -

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    +

    which are again referred to as the "inside", $N$, "outside", $P$, and "surface" region, $S$. These type of quadrature rules are in general needed in immersed discontinuous Galerkin methods.

    +

    Under the hood, this class uses the QuadratureGenerator class to build these rules. This is done by restricting the dim-dimensional level set function to the face, thus creating a (dim-1)-dimensional level set function, $\phi$. It then creates the (dim-1)-dimensional quadratures by calling QuadratureGenerator with $\phi$. This means that what holds for the QuadratureGenerator class in general also holds for this class. In particular, if the 1d-quadrature that is used as base contains $n$ points, the number of points will be proportional to $n^{dim-1}$ in the in the inside/outside quadratures and to $n^{dim-2}$ in the surface quadrature.

    Definition at line 305 of file quadrature_generator.h.

    Member Typedef Documentation

    @@ -305,7 +305,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 2000 of file quadrature_generator.cc.

    @@ -327,7 +327,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 2008 of file quadrature_generator.cc.

    @@ -349,8 +349,8 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 2017 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-11-15 06:44:19.307581168 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1FaceQuadratureGenerator_3_011_01_4.html 2024-11-15 06:44:19.307581168 +0000 @@ -282,7 +282,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 2098 of file quadrature_generator.cc.

    @@ -304,7 +304,7 @@

    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $F$ is the face of the BoundingBox passed to generate().

    Definition at line 2105 of file quadrature_generator.cc.

    @@ -324,7 +324,7 @@
    -

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in F : \psi(x) = 0 \}$ where, $F$ is the face of the BoundingBox passed to generate().

    Note
    In 1d, this quadrature always contains 0 points.

    Definition at line 2113 of file quadrature_generator.cc.

    @@ -371,7 +371,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) < 0 \}$. Created in the last call to generate().

    Definition at line 498 of file quadrature_generator.h.

    @@ -396,7 +396,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    +

    Quadrature for the region $\{x \in F : \psi(x) > 0 \}$. Created in the last call to generate().

    Definition at line 505 of file quadrature_generator.h.

    @@ -421,7 +421,7 @@
    -

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    +

    Quadrature for the region $\{x \in F : \psi(x) = 0 \}$. This quadrature always contains zero points in 1d.

    Definition at line 512 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-11-15 06:44:19.327581347 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1ImmersedSurfaceQuadrature.html 2024-11-15 06:44:19.327581347 +0000 @@ -139,41 +139,41 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    -class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim >

    This class defines a quadrature formula to integrate over the intersection between an oriented surface, $\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    -

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    -

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    -\[
+class NonMatching::ImmersedSurfaceQuadrature< dim, spacedim ></div><p>This class defines a quadrature formula to integrate over the intersection between an oriented surface, <picture><source srcset=$\hat{S}$, and a cell or face. The word "immersed" in the class name reflects that the surface may intersect the cell/face in an arbitrary way.

    +

    The spacedim template parameter of this class is the dimension that the (spacedim-1)-dimensional surface is embedded in: $\hat{S} \subset \mathbb{R}^{\text{spacedim}}$. The dim parameter describes the dimension of the "object" that the surface intersects. That is, dim = spacedim corresponds to the surface intersecting a cell and dim = spacedim - 1 corresponds to the surface intersecting a face. The quadrature formula is described by a set of quadrature points, $\hat{x}_q \in \mathbb{R}^{\text{dim}}$, weights, $w_q$, and normalized surface normals, $\hat{n}_q \in \mathbb{R}^{\text{spacedim}}$.

    +

    Consider first the case dim = spacedim. We typically want to compute integrals in real space. A surface, $S$, intersecting a cell, $K$, in real space can be mapped onto a surface, $\hat{S}$, intersecting the unit cell, $\hat{K}$. Thus an integral over $S\cap K$ in real space can be transformed to an integral over $\hat{S} \cap \hat{K}$ according to

    +\[
 \int_{S\cap K} f dS =
 \int_{S\cap K} f |d\bar{S}| =
 \int_{\hat{S}\cap\hat{K}} f \circ F_{K} \det(J) |\left( J^{-1} \right
 )^T d\hat{S}|,
-\] +\]" src="form_2131.png"/>

    -

    where $F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    -\[
+<p> where <picture><source srcset=$F_K$ is the mapping from reference to real space and $J$ is its Jacobian matrix. This transformation is possible since the continuous surface elements are vectors: $d\bar{S}, d\hat{S} \in \mathbb{R}^{spacedim}$, which are parallel to the normals of $S$ and $\hat{S}$. That is, the normal is needed to do the transformation. Thus, in addition to storing points and weights, this quadrature stores also the normalized normal for each quadrature point. This can be viewed as storing a discrete surface element,

    +\[
 \Delta \hat{S}_q \dealcoloneq w_q \hat{n}_q \approx d\hat{S}(\hat{x}_q),
-\] +\]" src="form_2134.png"/>

    for each quadrature point. The surface integral in real space would then be approximated as

    -\[
+<picture><source srcset=\[
 \int_{S\cap K} f dS \approx
 \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \det(J_q)
 |\left( J_q^{-1} \right)^T \hat{n}_q| w_q.
-\] +\]" src="form_2135.png"/>

    -

    When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let $\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    -\[
+<p>When dim = spacedim - 1, this class represents a (spacedim-2)-dimensional integral. That is, if spacedim = 3 we have a line integral immersed in a face. Let <picture><source srcset=$\hat{r}(t)$, $t \in [0,T]$ be an arc-length parameterizations of $\hat{F}\cap \hat{S}$, i.e., the part of the surface that intersects the face in reference space. This means that $\bar{r}(t) = F_K(\hat{r}(t))$ is a parameterization of $S\cap F$. The transformation of the line integral now reads

    +\[
 \int_{S\cap F} f dr
 = \int_{0}^T f(\bar{r}(t)) \left \|\frac{d\bar{r}}{dt} \right \| dt
 = \int_{0}^T f(F_K(\hat{r}(t))) \left \| J \frac{d\hat{r}}{dt} \right \| dt
 \approx \sum_{q} f \left(F_{K}(\hat{x}_{q}) \right) \|J(\hat{x}_q)
 \hat{t}_q \| w_q,
-\] +\]" src="form_2141.png"/>

    -

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    +

    where $\hat{t}_q = \frac{d\hat{r}}{dt}(x_q) $ is the tangent to the curve at $\hat{x}_q$. This tangent can also be computed as $t_q = \hat{n}_q \times \hat{n}_F / \| \hat{n}_q \times \hat{n}_F \|$ where $\hat{n}_F$ is the face normal. It would be possible to compute the tangent by only knowing the normal to the curve in the face plane (i.e. the dim-dimensional normal). However, when these quadratures are used, the weak form typically involves the so-called conormal, which can not be computed without knowing the surface normal in $\mathbb{R}^{\text{spacedim}}$. The conormal is the unit vector parallel to the projection of the face normal into the surface plane. This is the same as the normalized boundary form.

    Definition at line 106 of file immersed_surface_quadrature.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-11-15 06:44:19.347581526 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1QuadratureGenerator.html 2024-11-15 06:44:19.347581526 +0000 @@ -147,7 +147,7 @@

    Detailed Description

    template<int dim>
    -class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    +class NonMatching::QuadratureGenerator< dim >

    This class creates immersed quadrature rules over a BoundingBox, $B \subset \mathbb{R}^{dim}$, when the domain is described by a level set function, $\psi$.

    This class creates quadrature rules for the intersections between the box and the three different regions defined by the level set function. That is, it creates quadrature rules to integrate over the following regions

    \[
 N = \{x \in B : \psi(x) < 0 \}, \\
@@ -158,13 +158,13 @@
  <div class=

    -

    When working with level set functions, the most common is to describe a domain, $\Omega$, as

    +

    When working with level set functions, the most common is to describe a domain, $\Omega$, as

    \[
 \Omega = \{ x \in \mathbb{R}^{dim} : \psi(x) < 0 \}.
 \]

    -

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    -

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    +

    Given this, we shall use the name convention that $N$ is the "inside" region (i.e. inside $\Omega$), $P$ is the "outside" region and $S$ is the "surface" region. The "inside" and "outside" quadratures will also be referred to as the "bulk"-quadratures.

    +

    The underlying algorithm use a 1-dimensional quadrature rule as base for creating the immersed quadrature rules. Gauss-Legendre quadrature (QGauss) is recommended. The constructor takes an hp::QCollection<1>. One can select which 1d-quadrature in the collection should be used through the set_1d_quadrature() function. The number of quadrature points in the constructed quadratures will vary depending on the level set function. More quadrature points will be created if the intersection is "bad", for example, if the zero-contour has a high curvature compared to the size of the box. However, if the number of points in the 1d quadrature is $n$ the number of points will be proportional to $n^{dim}$ in the bulk quadratures and to $n^{dim-1}$ in the surface quadrature. For example, in the 2d-example in the above figure, there are 2 points in the 1d-quadrature. If the 1d-quadrature is a Gauss-Legendre quadrature and the grid has size $h$, the immersed quadratures typically give global errors proportional to $h^{2n}$, both for the bulk and surface integrals. If the 1d-quadrature has positive weights, the weights of the immersed quadratures will also be positive.

    A detailed description of the underlying algorithm can be found in "High-Order %Quadrature Methods for Implicitly Defined Surfaces and Volumes in Hyperrectangles, R. I. Saye, SIAM J. Sci. Comput., 37(2), <a href="http://www.dx.doi.org/10.1137/140966290"> @@ -305,7 +305,7 @@

    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) < 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1871 of file quadrature_generator.cc.

    @@ -327,7 +327,7 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) > 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    Definition at line 1880 of file quadrature_generator.cc.

    @@ -349,8 +349,8 @@
    -

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    -
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.
    +

    Return the quadrature rule for the region $\{x \in B : \psi(x) = 0 \}$ created in the previous call to generate(). Here, $B$ is BoundingBox passed to generate().

    +
    Note
    The normal at the quadrature points will be parallel to $\nabla \psi$.

    Definition at line 1889 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-11-15 06:44:19.391581919 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1DiscreteQuadratureGeneratorImplementation_1_1RefSpaceFEFieldFunction.html 2024-11-15 06:44:19.391581919 +0000 @@ -255,10 +255,10 @@
    template<int dim, typename VectorType = Vector<double>>
    class NonMatching::internal::DiscreteQuadratureGeneratorImplementation::RefSpaceFEFieldFunction< dim, VectorType >

    This class evaluates a function defined by a solution vector and a DoFHandler transformed to reference space. To be precise, if we let $\hat{x}$ be a point on the reference cell, this class implements the function

    $\hat{f}(\hat{x}) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(\hat{x})$,

    -

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    +

    where $f_j$ are the local solution values and $\hat{\phi}_j(\hat(x))$ are the local reference space shape functions. The gradient and Hessian of this function are thus derivatives with respect to the reference space coordinates, $\hat{x}_0, \hat{x}_1, \ldots$.

    Note that this class is similar to FEFieldFunction, but that FEFieldFunction implements the following function on a given cell, $K$,

    $f(x) = \sum_{j=0}^{n-1} f_j \hat{\phi}_j(F_K^{-1}(x))$,

    -

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    +

    which has the same coefficients but uses real space basis functions. Here, $F_K$ is the mapping from the reference cell to the real cell.

    Before calling the value/gradient/hessian function, the set_active_cell function must be called to specify which cell the function should be evaluated on.

    Definition at line 1335 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-11-15 06:44:19.423582205 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator.html 2024-11-15 06:44:19.423582205 +0000 @@ -163,20 +163,20 @@

    Detailed Description

    template<int dim, int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< dim, spacedim >

    This class implements the Saye-algorithm cited in the documentation of the QuadratureGenerator class.

    -

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    -

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    -

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    -

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    +

    The generate function takes a number of $dim$-dimensional level set functions, $\psi_i$, and a BoundingBox<dim>, and builds a partitioning of quadratures, as defined in documentation of the QPartitioning class. That is, this class builds an object of type QPartitioning<dim>.

    +

    If all $\psi_i$ passed to generate can be determined to be positive or negative definite, the QPartitioning will consist of a single quadrature forming a tensor product.

    +

    If this is not the case, the algorithm uses recursion over the spatial dimension. The spacedim template parameter denotes the dimension we started with and dim denotes on what level we are in the recursion. That is, we first construct a QPartitioning<dim - 1> and then build the higher dimensional quadratures from these. What we in the end actually want is a spacedim-dimensional partitioning of quadratures, for a single level set function, $\psi$.

    +

    The algorithm is based on the implicit function theorem. Starting with a single level set function, $\psi$, we try to find a direction $i$, such that

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    throughout the whole box. This means that the zero-contour of the level set function can be parameterized by an implicit function

    -

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    +

    $H = H(x_0, ..., x_{i-1}, x_{i+1}, ..., x_{dim-1})$,

    so that

    -

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    -

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    -

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    +

    $\psi(..., x_{i-1}, H(..., x_{i-1}, x_{i+1}, ...), x_{i+1}, ...) = 0$,

    +

    over a subset, $I \subset C \subset \mathbb{R}^{dim-1}$, of the cross section, $C$, of the box (see BoundingBox::cross_section). Here, $I$ is the "indefinite"-region defined in the QPartitioning class. To follow convention in the original paper, we will -refer to $H$ as the "height-function" and to $i$ as the "height-function direction".

    +

    If a height function direction can be found, we go down in dimension by creating two new level set functions, $\{\psi_0, \psi_1\}$, which are the restriction of $\psi$ to the top and bottom faces of the box (in the height function direction). We then delegate to QGenerator<dim-1, spacedim> to create a QPartitioning<dim-1> over the cross section.

    When we reach the base case, $dim = 1$, the creation of QPartitioning<1> is simple. See the documentation in specialized class: QGenerator<1, spacedim>.

    As we go up through the dimensions and create the higher dimensional quadratures, we need to know the function value of the height functions at the lower dimensional quadrature points. Since the functions are implicit, we need to do root-finding on the level set functions to find the function values. For this we use the class UpThroughDimensionCreator, see documentation there.

    -

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    +

    When we have $n$ level set functions (i.e. after having gone down in dimension), we try to find a height function direction, which works for all those $\psi_i$ which are intersected by the zero contour (i.e. those not positive or negative definite). If such a direction exist, we will have a maximum of $n$ associated implicit height functions, $H_j$. Each $H_j$ parametrize the $x_i$-coordinate of the zero-contour over a region, $I_j$. The indefinite region in the lower dimensional partitioning is the union of these $I = \cup_j I_j$.

    As we try to find a height function direction, we estimate bounds on the gradient components by approximating each component as a 1st-order Taylor-polynomial. If a direction can not be found, the box is split and we recurse on each smaller box. This makes an implicit function more likely to exist since we seek it over a smaller portion of the zero contour. It also makes the estimated bounds tighter since we extrapolate the Taylor-polynomial a shorter distance.

    Since we can not split a box forever, there is an maximum number of allowed splits on the additional data struct passed to the constructor. If this is reached, the algorithm uses the midpoint method as a last resort.

    @@ -326,7 +326,7 @@
    -

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    +

    Gets the $(dim - 1)$-dimensional quadratures from the lower dimensional algorithm and creates the $dim$-dimensional quadrature rules over the box from the lower dimensional ones.

    Definition at line 1141 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:19.443582383 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QGenerator_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:19.443582383 +0000 @@ -164,8 +164,8 @@

    Detailed Description

    template<int spacedim>
    class NonMatching::internal::QuadratureGeneratorImplementation::QGenerator< 1, spacedim >

    The 1d-base case of the recursive algorithm QGenerator<dim, spacedim>.

    -

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    -

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    +

    Let $L$ and $R$ be the left and right bounds of the one-dimensional BoundingBox. This interval is partitioned into $[x_0, x_1, ..., x_n]$ where $x_0 = L$, $x_n = R$, and the remaining $x_i$ are the roots of the level set functions in the interval $[L, R]$. In each interval, $[x_i, x_{i+1}]$, quadrature points are distributed according to a 1d-quadrature rule. These points are added to one of the regions of QPartitioning determined from the signs of the level set functions on the interval (see documentation of QPartitioning).

    +

    If spacedim = 1 the points $[x_1, x_{n-1}]$ are also added as surface quadrature points to QPartitioning::surface.

    Definition at line 1276 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-11-15 06:44:19.459582526 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1QPartitioning.html 2024-11-15 06:44:19.459582526 +0000 @@ -134,19 +134,19 @@

    Detailed Description

    template<int dim>
    -class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

    Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

    -

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    -

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    +class NonMatching::internal::QuadratureGeneratorImplementation::QPartitioning< dim >

    Class that stores quadrature rules to integrate over 4 different regions of a single BoundingBox, $B$. Given multiple level set functions,

    +

    $\psi_i : \mathbb{R}^{dim} \rightarrow \mathbb{R}$, $i = 0, 1, ...$,

    +

    the box, $B \subset \mathbb{R}^{dim}$, is partitioned into a "negative", "positive", and "indefinite" region, $B = N \cup P \cup I$, according to the signs of $\psi_i$ over each region:

    -\[
+<picture><source srcset=\[
 N = \{x \in B : \psi_i(x) < 0, \forall i \}, \\
 P = \{x \in B : \psi_i(x) > 0, \forall i \}, \\
 I = B \setminus (\overline{N} \cup \overline{P}).
-\] +\]" src="form_2176.png"/>

    -

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    -

    $S = \{x \in B : \psi(x) = 0 \}$.

    -

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    +

    Thus, all $\psi_i$ are positive over $P$ and negative over $N$. Over $I$ the level set functions differ in sign. This class holds quadrature rules for each of these regions. In addition, when there is a single level set function, $\psi$, it holds a surface quadrature for the zero contour of $\psi$:

    +

    $S = \{x \in B : \psi(x) = 0 \}$.

    +

    Note that when there is a single level set function, $I$ is empty and $N$ and $P$ are the regions that one typically integrates over in an immersed finite element method.

    Definition at line 815 of file quadrature_generator.h.

    Member Function Documentation

    @@ -208,7 +208,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) < 0 \forall i \}$ of the box, $B$.

    Definition at line 835 of file quadrature_generator.h.

    @@ -227,7 +227,7 @@
    -

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi_i(x) > 0 \forall i \}$ of the box, $B$.

    Definition at line 841 of file quadrature_generator.h.

    @@ -265,7 +265,7 @@
    -

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    +

    Quadrature for the region $\{x \in B : \psi(x) = 0 \}$ of the box, $B$.

    Definition at line 853 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-11-15 06:44:19.479582704 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1RootFinder.html 2024-11-15 06:44:19.479582704 +0000 @@ -135,7 +135,7 @@  

    Detailed Description

    -

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    +

    A class that attempts to find multiple distinct roots of a function, $f(x)$, over an interval, $[l, r]$. This is done as follows. If there is a sign change in function value between the interval end points, we solve for the root. If there is no sign change, we attempt to bound the function value away from zero on $[a, b]$, to conclude that no roots exist. If we can't exclude that there are roots, we split the interval in two: $[l, (r + l) / 2]$, $[(r + l) / 2, r]$, and use the same algorithm recursively on each interval. This means that we can typically find 2 distinct roots, but not 3.

    The bounds on the functions values are estimated using the function taylor_estimate_function_bounds, which approximates the function as a second order Taylor-polynomial around the interval midpoint. When we have a sign change on an interval, this class uses boost::math::tools::toms748_solve for finding roots .

    Definition at line 664 of file quadrature_generator.h.

    @@ -181,7 +181,7 @@ std::vector< double > & roots&#href_anchor"memdoc"> -

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    +

    For each of the incoming functions, attempt to find the roots over the interval defined by interval and add these to roots. The returned roots will be sorted in ascending order: $x_0 < x_1 <...$ and duplicate roots (with respect to the tolerance in AdditionalData) will be removed.

    Definition at line 533 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-11-15 06:44:19.499582883 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1UpThroughDimensionCreator.html 2024-11-15 06:44:19.499582883 +0000 @@ -144,13 +144,13 @@

    Detailed Description

    template<int dim, int spacedim>
    -class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    -

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    -

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    -

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    -

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    -

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    -

    where $i$ is the height function direction.

    +class NonMatching::internal::QuadratureGeneratorImplementation::UpThroughDimensionCreator< dim, spacedim >

    This class is responsible for creating quadrature points for the $dim$-dimensional quadrature partitioning from an $(dim - 1)$-dimensional "indefinite" quadrature (see QPartitioning documentation).

    +

    To be precise, let $[L, R]$ be the extents of the box in the height function direction and let $I \subset \mathbb{R}^{dim-1}$ be the lower dimensional indefinite region. This class will create quadrature points over $I \times [L, R] \subset \mathbb{R}^{dim}$ and in the case $dim=spacedim$, points for the surface quadrature.

    +

    For each lower dimensional quadrature point, $(x_I, w_I)$ in the indefinite quadrature, we create several 1d-level set functions by restricting $\psi_j$ to $x_I$. We then partition the interval $[L, R]$ into $[y_0, y_1, ..., y_n]$, where $y_0 = L$, $y_n = R$, and the remaining $y_i$ are the roots of the 1d-level set functions in $[L, R]$. Since the level set functions change sign between the roots, each interval belong to different regions in the quadrature partitioning.

    +

    In each interval, $[y_i, y_{i+1}]$, we distribute points according to the 1d-base quadrature, $(x_q, w_q)$ and take the cartesian product with $(x_I, w_I)$ to create the $dim$-dimensional quadrature points, $(X_q, W_q)$: $X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q)$, $W_q = w_I (y_{i+1} - y_i) w_q$.

    +

    When $dim=spacedim$, we have a single level set function, $\psi$. Since we have fulfilled the implicit function theorem, there is a single root $y_1 \in [L, R]$. The point, $x_s = x_I \times y_1$, will be added as a point in the surface quadrature. One can show that the correct weight of this point is

    +

    $w_s = \frac{\|\nabla \psi(x_s)\|}{|\partial_i \psi(x_s)|} w_I$,

    +

    where $i$ is the height function direction.

    Definition at line 896 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    @@ -210,7 +210,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    +

    Create $dim$-dimensional immersed quadratures from the incoming $(dim-1)$-dimensional quadratures and add these to q_partitioning.

    Definition at line 748 of file quadrature_generator.cc.

    @@ -283,7 +283,7 @@

    Create a surface quadrature point from the lower-dimensional point and add it to surface_quadrature.

    -

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    +

    This function is only called when $dim=spacedim$ and there is a single level set function. At this point there should only be a single root in the interval $[L, R]$

    Definition at line 804 of file quadrature_generator.cc.

    @@ -392,7 +392,7 @@
    -

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    +

    1d-functions, that are restrictions of each dim-dimensional level set function passed to generate() to some $(dim-1)$-dimensional point.

    Definition at line 966 of file quadrature_generator.h.

    @@ -446,7 +446,7 @@
    -

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    +

    The roots of the functions in point_restrictions. This will be the values of the height functions, $\{H_i(x_I)\}$ at some lower dimensional quadrature point, $x_I \in \mathbb{R}^{dim-1}$.

    Definition at line 979 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-11-15 06:44:19.523583098 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector.html 2024-11-15 06:44:19.523583098 +0000 @@ -530,8 +530,8 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian).

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$.

    Parameters
    @@ -562,7 +562,7 @@
    Parameters
    current_uCurrent value of $u$
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * \texttt{src}$.
    [out]dstThe solution of $J^{-1} * \texttt{src}$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-11-15 06:44:19.539583240 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classNonlinearSolverSelector_1_1AdditionalData.html 2024-11-15 06:44:19.539583240 +0000 @@ -260,7 +260,7 @@ solver_typeNonlinear solver type. strategyMethod of solving the nonlinear problem. maximum_non_linear_iterationsMaximum number of nonlinear iterations. - function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. + function_toleranceAbsolute stopping tolerance for the norm of the residual $F(u)$. relative_toleranceRelative stopping tolerance. step_toleranceTolerance for minimum scaled step length anderson_subspace_sizeSize of the Anderson acceleration subspace, use 0 to disable. @@ -343,7 +343,7 @@
    -

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    +

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    If set to zero, default values will be used.

    Definition at line 177 of file nonlinear.h.

    @@ -363,7 +363,7 @@
    -

    Relative $l_2$ tolerance of the residual to be reached.

    +

    Relative $l_2$ tolerance of the residual to be reached.

    Note
    Solver terminates successfully if either the function tolerance or the relative tolerance has been reached.

    Definition at line 185 of file nonlinear.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-11-15 06:44:19.575583562 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1ArclengthProjectionLineManifold.html 2024-11-15 06:44:19.575583562 +0000 @@ -573,7 +573,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -603,24 +603,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -629,11 +629,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-11-15 06:44:19.607583848 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1DirectionalProjectionManifold.html 2024-11-15 06:44:19.607583848 +0000 @@ -487,7 +487,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -496,7 +496,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-11-15 06:44:19.643584170 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NURBSPatchManifold.html 2024-11-15 06:44:19.643584170 +0000 @@ -448,7 +448,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the uv coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -637,7 +637,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -667,24 +667,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -693,11 +693,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-11-15 06:44:19.679584491 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalProjectionManifold.html 2024-11-15 06:44:19.679584491 +0000 @@ -481,7 +481,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -490,7 +490,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-11-15 06:44:19.711584777 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classOpenCASCADE_1_1NormalToMeshProjectionManifold.html 2024-11-15 06:44:19.711584777 +0000 @@ -481,7 +481,7 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. For the current class, we assume that the manifold is flat, so the geodesic is the straight line between the two points, and we return $\mathbf x_2-\mathbf x_1$. The normalization of the vector is chosen so that it fits the convention described in Manifold::get_tangent_vector().

    Note
    If you use this class as a stepping stone to build a manifold that only "slightly" deviates from a flat manifold, by overloading the project_to_manifold() function.
    Parameters
    @@ -490,7 +490,7 @@
    -
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.
    +
    Returns
    A "direction" vector tangential to the geodesic. Here, this is $\mathbf x_2-\mathbf x_1$, possibly modified by the periodicity of the domain as set in the constructor, to use the "shortest" connection between the points through the periodic boundary as necessary.

    Reimplemented from Manifold< dim, spacedim >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-11-15 06:44:19.751585134 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPArpackSolver.html 2024-11-15 06:44:19.751585134 +0000 @@ -286,7 +286,7 @@

    Detailed Description

    template<typename VectorType>
    class PArpackSolver< VectorType >

    Interface for using PARPACK. PARPACK is a collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. Here we interface to the routines pdneupd, pdseupd, pdnaupd, pdsaupd of PARPACK. The package is designed to compute a few eigenvalues and corresponding eigenvectors of a general n by n matrix A. It is most appropriate for large sparse matrices A.

    -

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    +

    In this class we make use of the method applied to the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively.

    The ArpackSolver can be used in application codes in the following way:

    const unsigned int num_arnoldi_vectors = 2*size_of_spectrum + 2;
    @@ -311,7 +311,7 @@
    const AdditionalData additional_data
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable size_of_spectrum tells PARPACK the number of eigenvector/eigenvalue pairs to solve for. Here, lambda is a vector that will contain the eigenvalues computed, x a vector of objects of type V that will contain the eigenvectors computed.

    Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), OP is an inverse operation for the matrix A - sigma * B, where sigma is a shift value, set to zero by default. Whereas in mode 2, OP is an inverse of M. Finally, mode 1 corresponds to standard eigenvalue problem without spectral transformation $Ax=\lambda x$. The mode can be specified via AdditionalData object. Note that for shift-and-invert (mode=3), the sought eigenpairs are those after the spectral transformation is applied.

    The OP can be specified by using a LinearOperator:

    const double shift = 5.0;
    const auto op_A = linear_operator<vector_t>(A);
    @@ -645,7 +645,7 @@ const unsigned int n_eigenvalues&#href_anchor"memdoc"> -

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    +

    Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling the pd(n/s)eupd and pd(n/s)aupd functions of PARPACK.

    In mode=3, inverse should correspond to $[A-\sigma B]^{-1}$, whereas in mode=2 it should represent $B^{-1}$. For mode=1 both B and inverse are ignored.

    Definition at line 769 of file parpack_solver.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-11-15 06:44:19.775585348 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1CommunicationPattern.html 2024-11-15 06:44:19.775585348 +0000 @@ -304,7 +304,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

    Reinitialization that takes the number of locally-owned degrees of freedom local_size and an index set for the required ghost indices ghost_indices.

    -

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    +

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    The export_to_ghost_array will populate an array containing values from locally-owned AND ghost indices, as for the relevant set of dofs of a usual FEM simulation.

    Definition at line 49 of file petsc_communication_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-11-15 06:44:19.823585777 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1FullMatrix.html 2024-11-15 06:44:19.823585777 +0000 @@ -1518,8 +1518,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 418 of file petsc_matrix_base.cc.

    @@ -1547,8 +1547,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 431 of file petsc_matrix_base.cc.

    @@ -1604,7 +1604,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1635,7 +1635,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2351,7 +2351,7 @@

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 644 of file petsc_matrix_base.cc.

    @@ -2385,8 +2385,8 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 652 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-11-15 06:44:19.887586349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockSparseMatrix.html 2024-11-15 06:44:19.891586384 +0000 @@ -888,7 +888,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Definition at line 408 of file petsc_block_sparse_matrix.h.

    @@ -1000,7 +1000,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 439 of file petsc_block_sparse_matrix.h.

    @@ -2050,7 +2050,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2155,7 +2155,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2624,7 +2624,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2732,7 +2732,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-11-15 06:44:19.951586920 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1BlockVector.html 2024-11-15 06:44:19.951586920 +0000 @@ -1982,7 +1982,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -2034,7 +2034,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -2060,7 +2060,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -2086,7 +2086,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-11-15 06:44:20.015587492 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1SparseMatrix.html 2024-11-15 06:44:20.015587492 +0000 @@ -827,7 +827,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    @@ -850,7 +850,7 @@ const Vector & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u^\ast,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Definition at line 814 of file petsc_parallel_sparse_matrix.cc.

    @@ -2072,8 +2072,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 418 of file petsc_matrix_base.cc.

    @@ -2101,8 +2101,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 431 of file petsc_matrix_base.cc.

    @@ -2158,7 +2158,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2189,7 +2189,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2905,7 +2905,7 @@

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 644 of file petsc_matrix_base.cc.

    @@ -2939,8 +2939,8 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 652 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-11-15 06:44:20.075588028 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MPI_1_1Vector.html 2024-11-15 06:44:20.075588028 +0000 @@ -1941,7 +1941,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Definition at line 604 of file petsc_vector_base.cc.

    @@ -1997,7 +1997,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

    Definition at line 664 of file petsc_vector_base.cc.

    @@ -2026,7 +2026,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Definition at line 677 of file petsc_vector_base.cc.

    @@ -2054,7 +2054,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Definition at line 690 of file petsc_vector_base.cc.

    @@ -2082,7 +2082,7 @@
    -

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    +

    $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

    Definition at line 732 of file petsc_vector_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-11-15 06:44:20.131588528 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixBase.html 2024-11-15 06:44:20.131588528 +0000 @@ -1303,8 +1303,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 418 of file petsc_matrix_base.cc.

    @@ -1324,8 +1324,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 431 of file petsc_matrix_base.cc.

    @@ -1365,7 +1365,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -1389,7 +1389,7 @@ const VectorBase & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -1972,7 +1972,7 @@

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 644 of file petsc_matrix_base.cc.

    @@ -2006,8 +2006,8 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 652 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-11-15 06:44:20.191589064 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1MatrixFree.html 2024-11-15 06:44:20.191589064 +0000 @@ -1962,8 +1962,8 @@
    -

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 418 of file petsc_matrix_base.cc.

    @@ -1991,8 +1991,8 @@
    -

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    Definition at line 431 of file petsc_matrix_base.cc.

    @@ -2048,7 +2048,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

    @@ -2079,7 +2079,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

    Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

    @@ -2675,7 +2675,7 @@

    Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 644 of file petsc_matrix_base.cc.

    @@ -2709,8 +2709,8 @@
    -

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    -

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    +

    Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

    +

    This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

    The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

    Definition at line 652 of file petsc_matrix_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-11-15 06:44:20.219589314 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1NonlinearSolver.html 2024-11-15 06:44:20.219589314 +0000 @@ -194,7 +194,7 @@
    Mat & petsc_matrix();
    ...

    In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

    -

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    +

    To use the solvers the user needs to provide the implementation of $F$ via the NonlinearSolver::residual callback.

    The default linearization procedure of a solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations. For details, consult the PETSc manual.

    Users can also provide the implementations of the Jacobian. This can be accomplished in two ways:

    • PETSc style using NonlinearSolver::jacobian
    • /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseDirectMUMPS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseDirectMUMPS.html 2024-11-15 06:44:20.243589528 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseDirectMUMPS.html 2024-11-15 06:44:20.243589528 +0000 @@ -273,7 +273,7 @@

      If called with true as argument, tell the direct solver to assume that the system matrix is symmetric. It does so by computing the LDL^T decomposition (in effect, a Cholesky decomposition) instead of more expensive LU decomposition. The argument indicates whether the matrix can be assumed to be symmetric or not.

      -

      Note that most finite element matrices are "structurally symmetric", i.e., the sparsity pattern is symmetric, even though the matrix is not. An example of a matrix that is structurally symmetric but not symmetric is the matrix you obtain by discretizing the advection equation $\nabla \cdot (\vec\beta u) = f$ (see, for example step-12). Because the operator here is not symmetric, the matrix is not symmetric either; however, if matrix entry $A_{ij}$ is nonzero, then matrix entry $A_{ji}$ is generally not zero either (and in any case, DoFTools::make_sparsity_pattern() will create a symmetric sparsity pattern). That said, the current function is not meant to indicate whether the sparsity pattern is symmetric, but whether the matrix itself is symmetric, and this typically requires that the differential operator you are considering is symmetric.

      +

      Note that most finite element matrices are "structurally symmetric", i.e., the sparsity pattern is symmetric, even though the matrix is not. An example of a matrix that is structurally symmetric but not symmetric is the matrix you obtain by discretizing the advection equation $\nabla \cdot (\vec\beta u) = f$ (see, for example step-12). Because the operator here is not symmetric, the matrix is not symmetric either; however, if matrix entry $A_{ij}$ is nonzero, then matrix entry $A_{ji}$ is generally not zero either (and in any case, DoFTools::make_sparsity_pattern() will create a symmetric sparsity pattern). That said, the current function is not meant to indicate whether the sparsity pattern is symmetric, but whether the matrix itself is symmetric, and this typically requires that the differential operator you are considering is symmetric.

      Definition at line 814 of file petsc_solver.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-11-15 06:44:20.303590064 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1SparseMatrix.html 2024-11-15 06:44:20.303590064 +0000 @@ -1952,8 +1952,8 @@
      -

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
-j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
+j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural matrix norm that is compatible to the l1-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 418 of file petsc_matrix_base.cc.

      @@ -1981,8 +1981,8 @@
      -

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
-i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      +

      Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
+i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural matrix norm that is compatible to the linfty-norm of vectors, i.e. $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

      Definition at line 431 of file petsc_matrix_base.cc.

      @@ -2038,7 +2038,7 @@
      -

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      +

      Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

      Obviously, the matrix needs to be quadratic for this operation.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then the given vector has to be a distributed vector as well. Conversely, if the matrix is not distributed, then neither may the vector be.

      @@ -2069,7 +2069,7 @@
      -

      Compute the matrix scalar product $\left(u,Mv\right)$.

      +

      Compute the matrix scalar product $\left(u,Mv\right)$.

      The implementation of this function is not as efficient as the one in the MatrixBase class used in deal.II (i.e. the original one, not the PETSc wrapper class) since PETSc doesn't support this operation and needs a temporary vector.

      Note that if the current object represents a parallel distributed matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors have to be distributed vectors as well. Conversely, if the matrix is not distributed, then neither of the vectors may be.

      @@ -2785,7 +2785,7 @@

      Base function to perform the matrix-matrix multiplication $C = AB$, or, if a vector $V$ whose size is compatible with B is given, $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be reset by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 644 of file petsc_matrix_base.cc.

      @@ -2819,8 +2819,8 @@
      -

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      -

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      +

      Base function to perform the matrix-matrix multiplication with the transpose of this, i.e., $C = A^T B$, or, if an optional vector $V$ whose size is compatible with $B$ is given, $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a diagonal matrix with the vector entries.

      +

      This function assumes that the calling matrix $A$ and $B$ have compatible sizes. The size of $C$ will be set within this function.

      The content as well as the sparsity pattern of the matrix $C$ will be changed by this function, so make sure that the sparsity pattern is not used somewhere else in your program. This is an expensive operation, so think twice before you use this function.

      Definition at line 652 of file petsc_matrix_base.cc.

      /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-11-15 06:44:20.335590350 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1TimeStepper.html 2024-11-15 06:44:20.335590350 +0000 @@ -247,7 +247,7 @@
      Mat & petsc_matrix();
      ...

      In particular, the supported types are the ones that can wrap PETSc's native vector and matrix classes, that are able to modify them in place, and that can return PETSc native types when requested.

      -

      To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

      +

      To use explicit solvers (like for example explicit Runge-Kutta methods), the user only needs to provide the implementation of $G$ via the TimeStepper::explicit_function. For implicit solvers, users have also the alternative of providing the $F$ function via TimeStepper::implicit_function. IMEX methods are also supported by providing both callbacks.

      The default linearization procedure of an implicit solver instantiated with this class consists in using Jacobian-Free-Newton-Krylov; the action of tangent matrices inside a linear solver process are approximated via matrix-free finite-differencing of the nonlinear residual equations that are ODE-solver specific. For details, consult the PETSc manual.

      Users can also provide the implementations of the Jacobians. This can be accomplished in two ways:

      • PETSc style using TimeStepper::implicit_jacobian and TimeStepper::explicit_jacobian.
      • @@ -703,7 +703,7 @@

        Callback for the computation of the implicit Jacobian $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.

        Definition at line 501 of file petsc_ts.h.

        @@ -788,7 +788,7 @@

        Callback for the set up of the Jacobian system.

        This callback gives full control to users to set up the linearized equations $\dfrac{\partial F}{\partial y} + \alpha \dfrac{\partial F}{\partial \dot
 y}$.

        -

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        +

        All implicit solvers implementations are recast to use the above linearization. The $\alpha$ parameter is time-step and solver-type specific.

        Solvers must be provided via TimeStepper::solve_with_jacobian.

        Note
        This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-11-15 06:44:20.383590779 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPETScWrappers_1_1VectorBase.html 2024-11-15 06:44:20.383590779 +0000 @@ -1169,7 +1169,7 @@
        -

        Return the square of the $l_2$-norm.

        +

        Return the square of the $l_2$-norm.

        Definition at line 604 of file petsc_vector_base.cc.

        @@ -1209,7 +1209,7 @@
        -

        $l_1$-norm of the vector. The sum of the absolute values.

        +

        $l_1$-norm of the vector. The sum of the absolute values.

        Note
        In complex-valued PETSc priori to 3.7.0 this norm is implemented as the sum of absolute values of real and imaginary parts of elements of a complex vector.

        Definition at line 664 of file petsc_vector_base.cc.

        @@ -1230,7 +1230,7 @@
        -

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        +

        $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

        Definition at line 677 of file petsc_vector_base.cc.

        @@ -1250,7 +1250,7 @@
        -

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        +

        $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

        Definition at line 690 of file petsc_vector_base.cc.

        @@ -1270,7 +1270,7 @@
        -

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        +

        $l_\infty$-norm of the vector. Return the value of the vector element with the maximum absolute value.

        Definition at line 732 of file petsc_vector_base.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-11-15 06:44:20.503591851 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1ParticleHandler.html 2024-11-15 06:44:20.507591886 +0000 @@ -1057,7 +1057,7 @@ const typename Triangulation< dim, spacedim >::active_cell_iterator & cell&#href_anchor"memdoc">

        Insert a particle into the collection of particles. Return an iterator to the new position of the particle. This function involves a copy of the particle and its properties. Note that this function is of $O(N \log
-N)$ complexity for $N$ particles.

        +N)$" src="form_2511.png"/> complexity for $N$ particles.

        Definition at line 578 of file particle_handler.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-11-15 06:44:20.535592136 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classParticles_1_1PropertyPool.html 2024-11-15 06:44:20.535592136 +0000 @@ -677,7 +677,7 @@
        -

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        +

        This function makes sure that all internally stored memory blocks are sorted in the same order as one would loop over the handles_to_sort container. This makes sure memory access is contiguous with actual memory location. Because the ordering is given in the input argument the complexity of this function is $O(N)$ where $N$ is the number of elements in the input argument.

        Definition at line 191 of file property_pool.cc.

        /usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-11-15 06:44:20.695593565 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPersistentTriangulation.html 2024-11-15 06:44:20.695593565 +0000 @@ -2260,7 +2260,7 @@
        -

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        +

        Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

        The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

        Note
        This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
        @@ -6984,7 +6984,7 @@
        -

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        +

        Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

        Note
        The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
        /usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-11-15 06:44:20.747594030 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPoint.html 2024-11-15 06:44:20.751594065 +0000 @@ -879,7 +879,7 @@
        -

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        +

        Return the Euclidean distance of this point to the point p, i.e. the $l_2$ norm of the difference between the vectors representing the two points.

        Note
        This function can also be used in device code.
        @@ -1491,7 +1491,7 @@
        -

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        +

        Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

        @@ -1517,7 +1517,7 @@
        -

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        +

        Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

        @@ -2168,11 +2168,11 @@

        Entrywise multiplication of two tensor objects of general rank.

        This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

        -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

        Template Parameters
        @@ -2207,17 +2207,17 @@
        -

        The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

        +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

        Note
        For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
        -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
        +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

    Definition at line 3039 of file tensor.h.

    @@ -2245,7 +2245,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3065 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-11-15 06:44:20.787594387 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolarManifold.html 2024-11-15 06:44:20.787594387 +0000 @@ -458,7 +458,7 @@
    -

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    +

    Given a point in the spacedim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the polar coordinate system to the Euclidean coordinate system. In other words, it is a matrix of size $\text{spacedim}\times\text{spacedim}$.

    This function is used in the computations required by the get_tangent_vector() function.

    Refer to the general documentation of this class for more information.

    @@ -716,7 +716,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -748,24 +748,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -774,11 +774,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-11-15 06:44:20.811594601 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernardiRaugel.html 2024-11-15 06:44:20.811594601 +0000 @@ -156,7 +156,7 @@
    template<int dim>
    class PolynomialsBernardiRaugel< dim >

    This class implements the Bernardi-Raugel polynomials similarly to the description in the Mathematics of Computation paper from 1985 by Christine Bernardi and Geneviève Raugel.

    The Bernardi-Raugel polynomials are originally defined as an enrichment of the $(P_1)^d$ elements on simplicial meshes for Stokes problems by the addition of bubble functions, yielding a locking-free finite element which is a subset of $(P_2)^d$ elements. This implementation is an enrichment of $(Q_1)^d$ elements which is a subset of $(Q_2)^d$ elements for quadrilateral and hexahedral meshes.

    -

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    +

    The $BR_1$ bubble functions are defined to have magnitude 1 at the center of face $e_i$ and direction $\mathbf{n}_i$ normal to face $e_i$, and magnitude 0 on all other vertices and faces. Ordering is consistent with the face numbering in GeometryInfo. The vector $\mathbf{n}_i$ points in the positive axis direction and not necessarily normal to the element for consistent orientation across edges.

    2d bubble functions (in order)

    $x=0$ edge: $\mathbf{p}_1 = \mathbf{n}_1 (1-x)(y)(1-y)$

     @f$x=1@f$ edge: @f$\mathbf{p}_2 = \mathbf{n}_2 (x)(y)(1-y)@f$
     
    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html differs (HTML document, ASCII text, with very long lines)
    --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-11-15 06:44:20.843594887 +0000
    +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomialsBernstein.html	2024-11-15 06:44:20.843594887 +0000
    @@ -1245,7 +1245,7 @@
       
     
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-11-15 06:44:20.875595173 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteInterpolation.html 2024-11-15 06:44:20.875595173 +0000 @@ -1197,7 +1197,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-11-15 06:44:20.911595494 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1HermiteLikeInterpolation.html 2024-11-15 06:44:20.911595494 +0000 @@ -1213,7 +1213,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-11-15 06:44:20.943595781 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Hierarchical.html 2024-11-15 06:44:20.947595816 +0000 @@ -1311,7 +1311,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-11-15 06:44:20.979596102 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1LagrangeEquidistant.html 2024-11-15 06:44:20.979596102 +0000 @@ -1226,7 +1226,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-11-15 06:44:21.015596424 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Legendre.html 2024-11-15 06:44:21.015596424 +0000 @@ -1187,7 +1187,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-11-15 06:44:21.047596709 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Lobatto.html 2024-11-15 06:44:21.047596709 +0000 @@ -232,7 +232,7 @@

    Detailed Description

    Lobatto polynomials of arbitrary degree on [0,1].

    -

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    +

    These polynomials are the integrated Legendre polynomials on [0,1]. The first two polynomials are the standard linear shape functions given by $l_0(x) = 1-x$ and $l_1(x) = x$. For $i\geq2$ we use the definition $l_i(x) = \frac{1}{\Vert L_{i-1}\Vert_2}\int_0^x L_{i-1}(t)\,dt$, where $L_i$ denotes the $i$-th Legendre polynomial on $[0,1]$. The Lobatto polynomials $l_0,\ldots,l_k$ form a complete basis of the polynomials space of degree $k$.

    Calling the constructor with a given index k will generate the polynomial with index k. But only for $k\geq 1$ the index equals the degree of the polynomial. For k==0 also a polynomial of degree 1 is generated.

    These polynomials are used for the construction of the shape functions of Nédélec elements of arbitrary order.

    @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-11-15 06:44:21.083597031 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Monomial.html 2024-11-15 06:44:21.083597031 +0000 @@ -1290,7 +1290,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-11-15 06:44:21.115597317 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1Polynomial.html 2024-11-15 06:44:21.115597317 +0000 @@ -1217,7 +1217,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-11-15 06:44:21.147597602 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPolynomials_1_1PolynomialsHermite.html 2024-11-15 06:44:21.147597602 +0000 @@ -239,8 +239,8 @@  

    Detailed Description

    -

    This class implements Hermite interpolation polynomials (see [CiarletRiavart1972interpolation]) enforcing the maximum possible level of regularity $r$ in the FEM basis given a polynomial degree of $2r+1$. The polynomials all represent either a non-zero shape value or derivative at $x=0$ and $x=1$ on the reference interval $x \in [0,1]$.

    -

    Indices $j = 0, 1, \dots, r$ refer to polynomials corresponding to a non-zero derivative (or shape value for $j=0$) of order $j$ at $x=0$, and indices $j = r+1, r+2, \dots, 2r+1$ refer to polynomials with a non-zero derivative of order $j-(r+1)$ (or value for $j=r+1$) at $x=1$. In particular, the $0^{th}$ function has a value of $1$ at $x=0$, and the $(r+1)^{th}$ function has a value of $1$ at $x=1$.The basis is rescaled such that a function corresponding to a non-zero $j^{th}$ derivative has derivative value $j! 4^{j}$ at the corresponding node. This is done to prevent the $L^{2}$-norm of the basis functions from reducing exponentially with the chosen regularity.

    +

    This class implements Hermite interpolation polynomials (see [CiarletRiavart1972interpolation]) enforcing the maximum possible level of regularity $r$ in the FEM basis given a polynomial degree of $2r+1$. The polynomials all represent either a non-zero shape value or derivative at $x=0$ and $x=1$ on the reference interval $x \in [0,1]$.

    +

    Indices $j = 0, 1, \dots, r$ refer to polynomials corresponding to a non-zero derivative (or shape value for $j=0$) of order $j$ at $x=0$, and indices $j = r+1, r+2, \dots, 2r+1$ refer to polynomials with a non-zero derivative of order $j-(r+1)$ (or value for $j=r+1$) at $x=1$. In particular, the $0^{th}$ function has a value of $1$ at $x=0$, and the $(r+1)^{th}$ function has a value of $1$ at $x=1$.The basis is rescaled such that a function corresponding to a non-zero $j^{th}$ derivative has derivative value $j! 4^{j}$ at the corresponding node. This is done to prevent the $L^{2}$-norm of the basis functions from reducing exponentially with the chosen regularity.

    Definition at line 60 of file polynomials_hermite.h.

    Member Typedef Documentation

    @@ -310,8 +310,8 @@ const unsigned int index&#href_anchor"memdoc"> -

    Constructor for an individual Hermite polynomial. We write $f_{j}$ for a polynomial that has a non-zero $j^{th}$ derivative at $x=0$ and $g_{j}$ for a polynomial with a non-zero $j^{th}$ derivative at $x=1$, meaning $f_{j}$ will have index $=j$ and $g_{j}$ will have index $= j + \mathtt{regularity} + 1$. The resulting polynomials will be degree $2\times \mathtt{regularity} +1$ and obey the following conditions:

    -\begin{align*}
+<p>Constructor for an individual Hermite polynomial. We write <picture><source srcset=$f_{j}$ for a polynomial that has a non-zero $j^{th}$ derivative at $x=0$ and $g_{j}$ for a polynomial with a non-zero $j^{th}$ derivative at $x=1$, meaning $f_{j}$ will have index $=j$ and $g_{j}$ will have index $= j + \mathtt{regularity} + 1$. The resulting polynomials will be degree $2\times \mathtt{regularity} +1$ and obey the following conditions:

    +\begin{align*}
 &\begin{matrix}
   \left. \frac{d^{i}}{dx^{i}} f_{j}(x) \right\vert_{x=0}
          = i! 4^{i} \delta_{i, j}, \hfill
@@ -326,18 +326,18 @@
          = i! 4^{i} \delta_{i, j}, \hfill
          &\qquad \hfill 0 \leq i \leq \mathtt{regularity},
 \end{matrix} \qquad 0 \leq j \leq \mathtt{regularity},
-\end{align*} +\end{align*}" src="form_692.png"/>

    -

    where $\delta_{i,j}$ is equal to $1$ whenever $i=j$, and equal to $0$ otherwise. These polynomials have explicit formulas given by

    -\begin{align*}
+<p> where <picture><source srcset=$\delta_{i,j}$ is equal to $1$ whenever $i=j$, and equal to $0$ otherwise. These polynomials have explicit formulas given by

    +\begin{align*}
   f_{j}(x) &= 4^{j} x^{j} (1-x)^{\mathtt{regularity}+1}
 \sum_{k=0}^{\mathtt{regularity} - j} \;^{\mathtt{regularity} + k} C_{k}
 x^{k}, \\ g_{j}(x) &= 4^{j} x^{\mathtt{regularity}+1} (x-1)^{j}
 \sum_{k=0}^{\mathtt{regularity} - j} \;^{\mathtt{regularity} + k} C_{k}
 (1-x)^{k},
-\end{align*} +\end{align*}" src="form_696.png"/>

    -

    where $^{n} C_{r} = \frac{n!}{r!(n-r)!}$ is the $r^{th}$ binomial coefficient of degree $n, \; 0 \leq r \leq n$.

    +

    where $^{n} C_{r} = \frac{n!}{r!(n-r)!}$ is the $r^{th}$ binomial coefficient of degree $n, \; 0 \leq r \leq n$.

    Parameters
    @@ -373,7 +373,7 @@
    regularityThe highest derivative for which the basis is used to enforce regularity.
    -

    This function generates a vector of Polynomial objects representing a complete basis of degree $2\times\mathtt{regularity} +1$ on the reference interval $[0,1]$.

    +

    This function generates a vector of Polynomial objects representing a complete basis of degree $2\times\mathtt{regularity} +1$ on the reference interval $[0,1]$.

    Parameters
    @@ -1210,7 +1210,7 @@
    regularityThe generated basis can be used to strongly enforce continuity in all derivatives up to and including this order.
    -

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    +

    The order of the highest derivative in which the Hermite basis can be used to impose continuity across element boundaries. It's related to the degree $p$ by $p = 2 \times\mathtt{regularity} +1$.

    Definition at line 131 of file polynomials_hermite.h.

    @@ -1260,7 +1260,7 @@
    -

    This stores whether the shape function corresponds to a non-zero value or derivative at $x=0$ on the reference interval ( $\mathtt{side} =0$) or at $x=1$ ( $\mathtt{side} =1$).

    +

    This stores whether the shape function corresponds to a non-zero value or derivative at $x=0$ on the reference interval ( $\mathtt{side} =0$) or at $x=1$ ( $\mathtt{side} =1$).

    Definition at line 144 of file polynomials_hermite.h.

    @@ -1336,7 +1336,7 @@
    -

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    +

    If the polynomial is in Lagrange product form, i.e., constructed as a product $(x-x_0) (x-x_1) \ldots (x-x_n)/c$, store the shifts $x_i$.

    Definition at line 317 of file polynomial.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-11-15 06:44:21.175597853 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classPreconditionRelaxation.html 2024-11-15 06:44:21.175597853 +0000 @@ -226,7 +226,7 @@ x^{n+1} = x^{n} + \alpha P^{-1} (b-Ax^n). \]" src="form_1830.png"/>

    -

    The relaxation parameter $\alpha$ has to be in the range:

    +

    The relaxation parameter $\alpha$ has to be in the range:

    \[
  0 < \alpha < \frac{2}{\lambda_{\max}(P^{-1}A)}.
 \] /usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-11-15 06:44:21.195598032 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQDuffy.html 2024-11-15 06:44:21.195598032 +0000 @@ -235,8 +235,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-11-15 06:44:21.207598138 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussChebyshev.html 2024-11-15 06:44:21.207598138 +0000 @@ -122,7 +122,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
     QGaussChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussChebyshev< dim >

    Gauss-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
+class QGaussChebyshev< dim ></div><p>Gauss-Chebyshev quadrature rules integrate the weighted product <picture><source srcset=$\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-1$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) =
 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.38

    Definition at line 558 of file quadrature_lib.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-11-15 06:44:21.223598281 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobatto.html 2024-11-15 06:44:21.223598281 +0000 @@ -123,7 +123,7 @@ class QGaussLobatto< dim >

    The Gauss-Lobatto family of quadrature rules for numerical integration.

    This modification of the Gauss quadrature uses the two interval end points as well. Being exact for polynomials of degree 2n-3, this formula is suboptimal by two degrees.

    The quadrature points are interval end points plus the roots of the derivative of the Legendre polynomial Pn-1 of degree n-1. The quadrature weights are 2/(n(n-1)(Pn-1(xi)2).

    -
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
+<dl class=
    Note
    This implementation has not been optimized concerning numerical stability and efficiency. It can be easily adapted to the general case of Gauss-Lobatto-Jacobi-Bouzitat quadrature with arbitrary parameters $\alpha$, $\beta$, of which the Gauss-Lobatto-Legendre quadrature ( $\alpha
 = \beta = 0$) is a special case.
    See also
    http://en.wikipedia.org/wiki/Handbook_of_Mathematical_Functions
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-11-15 06:44:21.239598424 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLobattoChebyshev.html 2024-11-15 06:44:21.239598424 +0000 @@ -122,7 +122,7 @@
     QGaussLobattoChebyshev (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    +class QGaussLobattoChebyshev< dim >

    Gauss-Lobatto-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$, with the additional constraint that two of the quadrature points are located at the endpoints of the quadrature interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-3$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. For details see: M. Abramowitz & I.A. Stegun: Handbook of Mathematical Functions, par. 25.4.40

    Definition at line 627 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-11-15 06:44:21.255598567 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLog.html 2024-11-15 06:44:21.255598567 +0000 @@ -132,8 +132,8 @@
    &#href_anchor"memitem:a9a003e3342b551507a0bab3fee019e40" id="r_a9a003e3342b551507a0bab3fee019e40">static std::vector< double > get_quadrature_weights (const unsigned int n)
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussLog< dim >

    A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate $\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    -

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
+class QGaussLog< dim ></div><p>A class for Gauss quadrature with logarithmic weighting function. This formula is used to integrate <picture><source srcset=$\ln|x|\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities. The collection of quadrature points and weights has been obtained using Numerical Recipes.

    +

    Notice that only the function $f(x)$ should be provided, i.e., $\int_0^1
 f(x) \ln|x| dx = \sum_{i=0}^N w_i f(q_i)$. Setting the revert flag to true at construction time switches the weight from $\ln|x|$ to $\ln|1-x|$.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-11-15 06:44:21.271598710 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussLogR.html 2024-11-15 06:44:21.271598710 +0000 @@ -128,15 +128,15 @@

    Detailed Description

    template<int dim>
    -class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    -

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    +class QGaussLogR< dim >

    A class for Gauss quadrature with arbitrary logarithmic weighting function. This formula is used to integrate $\ln(|x-x_0|/\alpha)\;f(x)$ on the interval $[0,1]$, where $f$ is a smooth function without singularities, and $x_0$ and $\alpha$ are given at construction time, and are the location of the singularity $x_0$ and an arbitrary scaling factor in the singularity.

    +

    You have to make sure that the point $x_0$ is not one of the Gauss quadrature points of order $N$, otherwise an exception is thrown, since the quadrature weights cannot be computed correctly.

    This quadrature formula is rather expensive, since it uses internally two Gauss quadrature formulas of order n to integrate the nonsingular part of the factor, and two GaussLog quadrature formulas to integrate on the separate segments $[0,x_0]$ and $[x_0,1]$. If the singularity is one of the extremes and the factor alpha is 1, then this quadrature is the same as QGaussLog.

    The last argument from the constructor allows you to use this quadrature rule in one of two possible ways:

    \[ \int_0^1 g(x) dx = \int_0^1 f(x)
 \ln\left(\frac{|x-x_0|}{\alpha}\right) dx = \sum_{i=0}^N w_i g(q_i) =
 \sum_{i=0}^N \bar{w}_i f(q_i) \]

    -

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    +

    Which one of the two sets of weights is provided, can be selected by the factor_out_singular_weight parameter. If it is false (the default), then the $\bar{w}_i$ weights are computed, and you should provide only the smooth function $f(x)$, since the singularity is included inside the quadrature. If the parameter is set to true, then the singularity is factored out of the quadrature formula, and you should provide a function $g(x)$, which should at least be similar to $\ln(|x-x_0|/\alpha)$.

    Notice that this quadrature rule is worthless if you try to use it for regular functions once you factored out the singularity.

    The weights and functions have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-11-15 06:44:21.291598888 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussOneOverR.html 2024-11-15 06:44:21.291598888 +0000 @@ -132,9 +132,9 @@ static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_size (const Point< dim > &singularity, const unsigned int n) &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    -class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    -

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    -

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    +class QGaussOneOverR< dim >

    A class for Gauss quadrature with $1/R$ weighting function. This formula can be used to integrate $1/R \ f(x)$ on the reference element $[0,1]^2$, where $f$ is a smooth function without singularities, and $R$ is the distance from the point $x$ to the vertex $\xi$, given at construction time by specifying its index. Notice that this distance is evaluated in the reference element.

    +

    This quadrature formula is obtained from two QGauss quadrature formulas, upon transforming them into polar coordinate system centered at the singularity, and then again into another reference element. This allows for the singularity to be cancelled by part of the Jacobian of the transformation, which contains $R$. In practice the reference element is transformed into a triangle by collapsing one of the sides adjacent to the singularity. The Jacobian of this transformation contains $R$, which is removed before scaling the original quadrature, and this process is repeated for the next half element.

    +

    Upon construction it is possible to specify whether we want the singularity removed, or not. In other words, this quadrature can be used to integrate $g(x) = 1/R\ f(x)$, or simply $f(x)$, with the $1/R$ factor already included in the quadrature weights.

    Definition at line 356 of file quadrature_lib.h.

    Constructor & Destructor Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-11-15 06:44:21.307599031 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussRadauChebyshev.html 2024-11-15 06:44:21.307599031 +0000 @@ -139,7 +139,7 @@

    Detailed Description

    template<int dim>
    -class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    +class QGaussRadauChebyshev< dim >

    Gauss-Radau-Chebyshev quadrature rules integrate the weighted product $\int_{-1}^1 f(x) w(x) dx$ with weight given by: $w(x) = 1/\sqrt{1-x^2}$ with the additional constraint that a quadrature point lies at one of the two extrema of the interval. The nodes and weights are known analytically, and are exact for monomials up to the order $2n-2$, where $n$ is the number of quadrature points. Here we rescale the quadrature formula so that it is defined on the interval $[0,1]$ instead of $[-1,1]$. So the quadrature formulas integrate exactly the integral $\int_0^1 f(x) w(x) dx$ with the weight: $w(x) = 1/\sqrt{x(1-x)}$. By default the quadrature is constructed with the left endpoint as quadrature node, but the quadrature node can be imposed at the right endpoint through the variable ep that can assume the values left or right.

    Definition at line 581 of file quadrature_lib.h.

    Member Enumeration Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-11-15 06:44:21.323599175 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQGaussSimplex.html 2024-11-15 06:44:21.323599175 +0000 @@ -201,8 +201,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-11-15 06:44:21.347599389 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQR.html 2024-11-15 06:44:21.347599389 +0000 @@ -315,7 +315,7 @@

    Remove first column and update QR factorization.

    Starting from the given QR decomposition $QR= A = [a_1\,\dots a_n], \quad a_i \in {\mathbb R}^m$ we aim at computing factorization of $\tilde Q \tilde R= \tilde A = [a_2\,\dots a_n], \quad a_i \in {\mathbb
 R}^m$.

    -

    The standard approach is to partition $R$ as

    +

    The standard approach is to partition $R$ as

    \[
 R =
 \begin{bmatrix}
@@ -368,7 +368,7 @@
   </tr>
 </table>
 </div><div class= -

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = Qx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -402,7 +402,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    @@ -436,7 +436,7 @@
    -

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    +

    Set $y = QRx$. The size of $x$ should be consistent with the size of the R matrix.

    Implements BaseQR< VectorType >.

    @@ -470,7 +470,7 @@
    -

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    +

    Set $y = R^T Q^Tx$. The size of $x$ should be consistent with the size of column vectors.

    Implements BaseQR< VectorType >.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-11-15 06:44:21.363599532 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQSimplex.html 2024-11-15 06:44:21.363599532 +0000 @@ -187,8 +187,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-11-15 06:44:21.379599675 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTelles.html 2024-11-15 06:44:21.379599675 +0000 @@ -148,7 +148,7 @@ \end{align*}" src="form_762.png"/>

    Since the library assumes $[0,1]$ as reference interval, we will map these values on the proper reference interval in the implementation.

    -

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    +

    This variable change can be used to integrate singular integrals. One example is $f(x)/|x-x_0|$ on the reference interval $[0,1]$, where $x_0$ is given at construction time, and is the location of the singularity $x_0$, and $f(x)$ is a smooth non singular function.

    Singular quadrature formula are rather expensive, nevertheless Telles' quadrature formula are much easier to compute with respect to other singular integration techniques as Lachat-Watson.

    We have implemented the case for $dim = 1$. When we deal the case $dim >1$ we have computed the quadrature formula has a tensorial product of one dimensional Telles' quadrature formulas considering the different components of the singularity.

    The weights and functions for Gauss Legendre formula have been tabulated up to order 12.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-11-15 06:44:21.395599818 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQTrianglePolar.html 2024-11-15 06:44:21.395599818 +0000 @@ -224,8 +224,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

    -

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    -

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    +

    where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

    +

    The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

    Parameters
    [in]verticesThe vertices of the simplex you wish to integrate on
    /usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-11-15 06:44:21.411599960 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQWitherdenVincentSimplex.html 2024-11-15 06:44:21.411599960 +0000 @@ -125,7 +125,7 @@
    [in]verticesThe vertices of the simplex you wish to integrate on
    &#href_anchor"details" id="details">

    Detailed Description

    template<int dim>
    class QWitherdenVincentSimplex< dim >

    Witherden-Vincent rules for simplex entities.

    -

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    +

    Like QGauss, users should specify a number n_points_1d as an indication of what polynomial degree to be integrated exactly (e.g., for $n$ points, the rule can integrate polynomials of degree $2 n - 1$ exactly). Additionally, since these rules were derived for simplices, there are also even-ordered rules (i.e., they integrate polynomials of degree $2 n$) available which do not have analogous 1d rules.

    The given value for n_points_1d = 1, 2, 3, 4, 5, 6, 7 (where the last two are only implemented in 2d) results in the following number of quadrature points in 2d and 3d:

    • 2d: odd (default): 1, 6, 7, 15, 19, 28, 37
    • 2d: even: 3, 6, 12, 16, 25, 33, 42
    • @@ -203,8 +203,8 @@ x = v_0 + B \hat x \]" src="form_783.png"/>

      -

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      -

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      +

      where the matrix $B$ is given by $B_{ij} = v[j][i]-v[0][i]$.

      +

      The weights are scaled with the absolute value of the determinant of $B$, that is $J \dealcoloneq |\text{det}(B)|$. If $J$ is zero, an empty quadrature is returned. This may happen, in two dimensions, if the three vertices are aligned, or in three dimensions if the four vertices are on the same plane. The present function works also in the codimension one and codimension two case. For instance, when dim=2 and spacedim=3, we can map the quadrature points so that they live on the physical triangle embedded in the three dimensional space. In such a case, the matrix $B$ is not square anymore.

      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-11-15 06:44:21.447600282 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classQuadrature.html 2024-11-15 06:44:21.447600282 +0000 @@ -242,9 +242,9 @@

      At least for quadrilaterals and hexahedra (or, more precisely, since we work on reference cells: for the unit square and the unit cube), quadrature formulas are typically tensor products of one-dimensional formulas (see also the section on implementation detail below).

      In order to allow for dimension independent programming, a quadrature formula of dimension zero exists. Since an integral over zero dimensions is the evaluation at a single point, any constructor of such a formula initializes to a single quadrature point with weight one. Access to the weight is possible, while access to the quadrature point is not permitted, since a Point of dimension zero contains no information. The main purpose of these formulae is their use in QProjector, which will create a useful formula of dimension one out of them.

      Mathematical background

      -

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      +

      For each quadrature formula we denote by m, the maximal degree of polynomials integrated exactly on the reference cell the quadrature formula corresponds to. This number is given in the documentation of each formula. The order of the integration error is m+1, that is, the error is the size of the cell to the m+1 by the Bramble-Hilbert Lemma. The number m is to be found in the documentation of each concrete formula. For the optimal formulae QGauss we have $m = 2N-1$, where $N$ is the constructor parameter to QGauss. The tensor product formulae are exact on tensor product polynomials of degree m in each space direction, but they are still only of (m+1)st order.

      Tensor product quadrature

      -

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      +

      At least for hypercube reference cells (i.e., squares and cubes), most integration formulae in more than one space dimension are tensor products of quadrature formulae in one space dimension, or more generally the tensor product of a formula in (dim-1) dimensions and one in one dimension. There is a special constructor to generate a quadrature formula from two others. For example, the QGauss<dim> formulae include Ndim quadrature points in dim dimensions, where $N$ is the constructor parameter of QGauss.

      Other uses of this class

      Quadrature objects are used in a number of places within deal.II where integration is performed, most notably via the FEValues and related classes. Some of these classes are also used in contexts where no integrals are involved, but where functions need to be evaluated at specific points, for example to evaluate the solution at individual points or to create graphical output. Examples are the implementation of VectorTools::point_value() and the DataOut and related classes (in particular in connection with the DataPostprocessor class). In such contexts, one often creates specific "Quadrature" objects in which the "quadrature points" are simply the points (in the coordinate system of the reference cell) at which one wants to evaluate the solution. In these kinds of cases, the weights stored by the current class are not used and the name "quadrature object" is interpreted as "list of evaluation points".

      /usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-11-15 06:44:21.503600782 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classReferenceCell.html 2024-11-15 06:44:21.503600782 +0000 @@ -500,7 +500,7 @@
      [in]verticesThe vertices of the simplex you wish to integrate on
    -

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the value of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2620 of file reference_cell.h.

    @@ -530,7 +530,7 @@
    -

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    +

    Compute the gradient of the $i$-th linear shape function at location $\xi$ for the current reference-cell type.

    Definition at line 2709 of file reference_cell.h.

    @@ -1001,7 +1001,7 @@
    -

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    +

    Return the reference-cell type of face face_no of the current object. For example, if the current object is ReferenceCells::Tetrahedron, then face_no must be between in the interval $[0,4)$ and the function will always return ReferenceCells::Triangle. If the current object is ReferenceCells::Hexahedron, then face_no must be between in the interval $[0,6)$ and the function will always return ReferenceCells::Quadrilateral. For wedges and pyramids, the returned object may be either ReferenceCells::Triangle or ReferenceCells::Quadrilateral, depending on the given index.

    Definition at line 1878 of file reference_cell.h.

    @@ -1429,7 +1429,7 @@
    -

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    +

    Return the $d$-dimensional volume of the reference cell that corresponds to the current object, where $d$ is the dimension of the space it lives in. For example, since the quadrilateral reference cell is $[0,1]^2$, its volume is one, whereas the volume of the reference triangle is 0.5 because it occupies the area $\{0 \le x,y \le 1, x+y\le 1\}$.

    For ReferenceCells::Vertex, the reference cell is a zero-dimensional point in a zero-dimensional space. As a consequence, one cannot meaningfully define a volume for it. The function returns one for this case, because this makes it possible to define useful quadrature rules based on the center of a reference cell and its volume.

    Definition at line 2743 of file reference_cell.h.

    @@ -1461,9 +1461,9 @@

    Return the barycenter (i.e., the center of mass) of the reference cell that corresponds to the current object. The function is not called center() because one can define the center of an object in a number of different ways whereas the barycenter of a reference cell $K$ is unambiguously defined as

    -\[
+<picture><source srcset=\[
   \mathbf x_K = \frac{1}{V} \int_K \mathbf x \; dx
-\] +\]" src="form_1546.png"/>

    where $V$ is the volume of the reference cell (see also the volume() function).

    @@ -1495,7 +1495,7 @@
    -

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    +

    Return true if the given point is inside the reference cell of the present space dimension up to some tolerance. This function accepts an additional parameter (which defaults to zero) which specifies by how much the point position may actually be outside the true reference cell. This is useful because in practice we may often not be able to compute the coordinates of a point in reference coordinates exactly, but only up to numerical roundoff. For example, strictly speaking one would expect that for points on the boundary of the reference cell, the function would return true if the tolerance was zero. But in practice, this may or may not actually be true; for example, the point $(1/3, 2/3)$ is on the boundary of the reference triangle because $1/3+2/3 \le 1$, but since neither of its coordinates are exactly representable in floating point arithmetic, the floating point representations of $1/3$ and $2/3$ may or may not add up to anything that is less than or equal to one.

    The tolerance parameter may be less than zero, indicating that the point should be safely inside the cell.

    Definition at line 2807 of file reference_cell.h.

    @@ -1548,8 +1548,8 @@
    -

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    -
    Precondition
    $i$ must be between zero and dim-1.
    +

    Return $i$-th unit tangential vector of a face of the reference cell. The vectors are arranged such that the cross product between the two vectors returns the unit normal vector.

    +
    Precondition
    $i$ must be between zero and dim-1.

    Definition at line 2916 of file reference_cell.h.

    @@ -2009,7 +2009,7 @@ const bool legacy_format&#href_anchor"memdoc"> -

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    +

    Given a set of node indices of the form $(i)$ or $(i,j)$ or $(i,j,k)$ (depending on whether the reference cell is in 1d, 2d, or 3d), return the index the VTK format uses for this node for cells that are subdivided as many times in each of the coordinate directions as described by the second argument. For a uniformly subdivided cell, the second argument is an array whose elements will all be equal.

    The last argument, legacy_format, indicates whether to use the old, VTK legacy format (when true) or the new, VTU format (when false).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-11-15 06:44:21.527600996 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classRol_1_1VectorAdaptor.html 2024-11-15 06:44:21.527600996 +0000 @@ -503,7 +503,7 @@
    -

    Return the $L^{2}$ norm of the wrapped vector.

    +

    Return the $L^{2}$ norm of the wrapped vector.

    The returned type is of VectorAdaptor::value_type so as to maintain consistency with ROL::Vector<VectorAdaptor::value_type> and more importantly to not to create an overloaded version namely, VectorAdaptor::real_type norm() const; if real_type and value_type are not of the same type.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-11-15 06:44:21.563601318 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1ARKode.html 2024-11-15 06:44:21.563601318 +0000 @@ -211,85 +211,85 @@

    The class ARKode is a wrapper to SUNDIALS variable-step, embedded, additive Runge-Kutta solver which is a general purpose solver for systems of ordinary differential equations characterized by the presence of both fast and slow dynamics.

    Fast dynamics are treated implicitly, and slow dynamics are treated explicitly, using nested families of implicit and explicit Runge-Kutta solvers.

    Citing directly from ARKode documentation:

    -

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    +

    ARKode solves ODE initial value problems (IVPs) in $R^N$. These problems should be posed in explicit form as

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y) + f_I (t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2645.png"/>

    -

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    -

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    +

    Here, $t$ is the independent variable (e.g. time), and the dependent variables are given by $y \in R^N$, and we use notation $\dot y$ to denote $dy/dt$. $M$ is a user-supplied nonsingular operator from $R^N \to R^N$. This operator may depend on $t$ but not on $y$.

    +

    For standard systems of ordinary differential equations and for problems arising from the spatial semi-discretization of partial differential equations using finite difference or finite volume methods, $M$ is typically the identity matrix, $I$. For PDEs using a finite-element spatial semi-discretization $M$ is typically a well-conditioned mass matrix.

    The two right-hand side functions may be described as:

    -

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    -

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    -

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    +

    ARKode may be used to solve stiff, nonstiff and multi-rate problems. Roughly speaking, stiffness is characterized by the presence of at least one rapidly damped mode, whose time constant is small compared to the time scale of the solution itself. In the implicit/explicit (ImEx) splitting above, these stiff components should be included in the right-hand side function $f_I (t, y)$.

    +

    For multi-rate problems, a user should provide both of the functions $f_E$ and $f_I$ that define the IVP system.

    +

    For nonstiff problems, only $f_E$ should be provided, and $f_I$ is assumed to be zero, i.e. the system reduces to the non-split IVP:

    -\[
+<picture><source srcset=\[
     M\dot y = f_E(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2655.png"/>

    -

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
-  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    -

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    +

    In this scenario, the ARK methods reduce to classical explicit Runge-Kutta methods (ERK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5, 6, 8\}$, with embeddings of orders $p = \{1,
+  2, 3, 4, 5, 7\}$. These default to the Heun-Euler-2-1-2, Bogacki-Shampine-4-2-3, Zonneveld-5-3-4, Cash-Karp-6-4-5, Verner-8-5-6 and Fehlberg-13-7-8 methods, respectively.

    +

    Finally, for stiff (linear or nonlinear) problems the user may provide only $f_I$, implying that $f_E = 0$, so that the system reduces to the non-split IVP

    -\[
+<picture><source srcset=\[
     M\dot y = f_I(t, y), \qquad y(t_0) = y_0.
-  \] + \]" src="form_2659.png"/>

    -

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    +

    Similarly to ERK methods, in this scenario the ARK methods reduce to classical diagonally-implicit Runge-Kutta methods (DIRK). For these classes of methods, ARKode allows orders of accuracy $q = \{2, 3, 4, 5\}$, with embeddings of orders $p = \{1, 2, 3, 4\}$. These default to the SDIRK-2-1-2, ARK-4-2-3 (implicit), SDIRK-5-3-4 and ARK-8-4-5 (implicit) methods, respectively.

    For both DIRK and ARK methods, an implicit system of the form

    -\[
+<picture><source srcset=\[
    G(z_i) \dealcoloneq M z_i - h_n A^I_{i,i} f_I (t^I_{n,i}, z_i) - a_i = 0
-  \] + \]" src="form_2662.png"/>

    -

    must be solved for each stage $z_i , i = 1, \ldots, s$, where we have the data

    -\[
+<p> must be solved for each stage <picture><source srcset=$z_i , i = 1, \ldots, s$, where we have the data

    +\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} [ A^E_{i,j} f_E(t^E_{n,j}, z_j)
    + A^I_{i,j} f_I (t^I_{n,j}, z_j)]
-  \] + \]" src="form_2664.png"/>

    for the ARK methods, or

    -\[
+<picture><source srcset=\[
    a_i \dealcoloneq
    M y_{n-1} + h_n \sum_{j=1}^{i-1} A^I_{i,j} f_I (t^I_{n,j}, z_j)
-  \] + \]" src="form_2665.png"/>

    -

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    -

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    +

    for the DIRK methods. Here $A^I_{i,j}$ and $A^E_{i,j}$ are the Butcher's tables for the chosen solver.

    +

    If $f_I(t,y)$ depends nonlinearly on $y$ then the systems above correspond to a nonlinear system of equations; if $f_I (t, y)$ depends linearly on $y$ then this is a linear system of equations. By specifying the flag implicit_function_is_linear, ARKode takes some shortcuts that allow a faster solution process.

    For systems of either type, ARKode allows a choice of solution strategy. The default solver choice is a variant of Newton's method,

    -\[
+<picture><source srcset=\[
    z_i^{m+1} = z_i^m +\delta^{m+1},
-  \] + \]" src="form_2669.png"/>

    -

    where $m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    -\[
+<p> where <picture><source srcset=$m$ is the Newton step index, and the Newton update $\delta^{m+1}$ requires the solution of the linear Newton system

    +\[
    N(z_i^m) \delta^{m+1} = -G(z_i^m),
-  \] + \]" src="form_2671.png"/>

    where

    -\[
+<picture><source srcset=\[
   N \dealcoloneq M - \gamma J, \quad J
   \dealcoloneq \frac{\partial f_I}{\partial y},
   \qquad \gamma\dealcoloneq h_n A^I_{i,i}.
-  \] + \]" src="form_2672.png"/>

    -

    As an alternate to Newton's method, ARKode may solve for each stage $z_i ,i
-  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    -\[
+<p>As an alternate to Newton's method, <a class=ARKode may solve for each stage $z_i ,i
+  = 1, \ldots , s$ using an Anderson-accelerated fixed point iteration

    +\[
   z_i^{m+1} = g(z_i^{m}), m=0,1,\ldots.
-  \] + \]" src="form_2674.png"/>

    Unlike with Newton's method, this option does not require the solution of a linear system at each iteration, instead opting for solution of a low-dimensional least-squares solution to construct the nonlinear update.

    -

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    -

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    -

    This improvement may be significant even for "small" values, e.g. $1 \leq
-  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    -

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    +

    Finally, if the user specifies implicit_function_is_linear, i.e., $f_I(t, y)$ depends linearly on $y$, and if the Newton-based nonlinear solver is chosen, then the system will be solved using only a single Newton iteration. Notice that in order for the Newton solver to be used, then jacobian_times_vector() should be supplied. If it is not supplied then only the fixed-point iteration will be supported, and the implicit_function_is_linear setting is ignored.

    +

    The optimal solver (Newton vs fixed-point) is highly problem-dependent. Since fixed-point solvers do not require the solution of any linear systems, each iteration may be significantly less costly than their Newton counterparts. However, this can come at the cost of slower convergence (or even divergence) in comparison with Newton-like methods. These fixed-point solvers do allow for user specification of the Anderson-accelerated subspace size, $m_k$. While the required amount of solver memory grows proportionately to $m_k N$, larger values of $m_k$ may result in faster convergence.

    +

    This improvement may be significant even for "small" values, e.g. $1 \leq
+  m_k \leq 5$, and convergence may not improve (or even deteriorate) for larger values of $m_k$. While ARKode uses a Newton-based iteration as its default solver due to its increased robustness on very stiff problems, it is highly recommended that users also consider the fixed-point solver for their cases when attempting a new problem.

    +

    For either the Newton or fixed-point solvers, it is well-known that both the efficiency and robustness of the algorithm intimately depends on the choice of a good initial guess. In ARKode, the initial guess for either nonlinear solution method is a predicted value $z_i(0)$ that is computed explicitly from the previously-computed data (e.g. $y_{n-2}, y_{n-1}$, and $z_j$ where $j < i$). Additional information on the specific predictor algorithms implemented in ARKode is provided in ARKode documentation.

    The user has to provide the implementation of at least one (or both) of the following std::functions:

    To provide a simple example, consider the harmonic oscillator problem:

    -\[
+<picture><source srcset=\[
   \begin{split}
     u'' & = -k^2 u \\
     u (0) & = 0 \\
     u'(0) & = k
   \end{split}
-  \] + \]" src="form_2682.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
   \begin{matrix}
     y_0' & =  y_1 \\
     y_1' & = - k^2 y_0
   \end{matrix}
-  \] + \]" src="form_2683.png"/>

    -

    That is $y' = A y$ where

    -\[
+<p>That is <picture><source srcset=$y' = A y$ where

    +\[
   A \dealcoloneq
   \begin{pmatrix}
   0 & 1 \\
   -k^2 &0
   \end{pmatrix}
-  \] + \]" src="form_2685.png"/>

    -

    and $y(0)=(0, k)^T$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
-*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    and $y(0)=(0, k)^T$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t) = k \cos(k
+*t)$, $y_1'(t) = -k^2 \sin(k t)$.

    A minimal implementation, using only explicit RK methods, is given by the following code snippet:

    using VectorType = Vector<double>;
    @@ -733,8 +733,8 @@
    -

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the explicit part of the IVP right hand side. Sets $explicit_f = f_E(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -755,8 +755,8 @@
    -

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
-y)$.

    +

    A function object that users may supply and that is intended to compute the implicit part of the IVP right hand side. Sets $implicit_f = f_I(t,
+y)$.

    At least one of explicit_function() or implicit_function() must be provided. According to which one is provided, explicit, implicit, or mixed RK methods are used.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, ARKode can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -778,7 +778,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-11-15 06:44:21.599601640 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA.html 2024-11-15 06:44:21.599601640 +0000 @@ -203,72 +203,72 @@

    Citing from the SUNDIALS documentation:

    Consider a system of Differential-Algebraic Equations written in the general form

    -\[
+<picture><source srcset=\[
    \begin{cases}
        F(t,y,\dot y) = 0\, , \\
        y(t_0) = y_0\, , \\
        \dot y (t_0) = \dot y_0\, .
    \end{cases}
- \] + \]" src="form_2701.png"/>

    -

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    +

    where $y,\dot y$ are vectors in $\mathbb{R}^n$, $t$ is often the time (but can also be a parametric quantity), and $F:\mathbb{R}\times\mathbb{R}^n\times \mathbb{R}^n\rightarrow\mathbb{R}^n$. Such problem is solved using Newton iteration augmented with a line search global strategy. The integration method used in IDA is the variable-order, variable-coefficient BDF (Backward Differentiation Formula), in fixed-leading-coefficient. The method order ranges from 1 to 5, with the BDF of order $q$ given by the multistep formula

    -\[
+<picture><source srcset=\[
    \sum_{i=0}^q \alpha_{n,i}\,y_{n-i}=h_n\,\dot y_n\, ,
    \label{eq:bdf}
- \] + \]" src="form_2705.png"/>

    -

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    +

    where $y_n$ and $\dot y_n$ are the computed approximations of $y(t_n)$ and $\dot y(t_n)$, respectively, and the step size is $h_n=t_n-t_{n-1}$. The coefficients $\alpha_{n,i}$ are uniquely determined by the order $q$, and the history of the step sizes. The application of the BDF method to the DAE system results in a nonlinear algebraic system to be solved at each time step:

    -\[
+<picture><source srcset=\[
    G(y_n)\equiv F\left(t_n,y_n,\dfrac{1}{h_n}\sum_{i=0}^q
   \alpha_{n,i}\,y_{n-i}\right)=0\, .
- \] + \]" src="form_2712.png"/>

    The Newton method leads to a linear system of the form

    -\[
+<picture><source srcset=\[
    J[y_{n(m+1)}-y_{n(m)}]=-G(y_{n(m)})\, ,
- \] + \]" src="form_2713.png"/>

    -

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    +

    where $y_{n(m)}$ is the $m$-th approximation to $y_n$, and $J$ is the approximation of the system Jacobian

    -\[
+<picture><source srcset=\[
    J=\dfrac{\partial G}{\partial y}
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}\, ,
- \] + \]" src="form_2715.png"/>

    -

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    +

    and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the scalar $\alpha$ changes whenever the step size or method order changes.

    A simple example: an ordinary differential equation

    To provide a simple example, consider the following harmonic oscillator problem:

    -\[ \begin{split}
+<picture><source srcset=\[ \begin{split}
    u'' & = -k^2 u \\
    u (0) & = 0 \\
    u'(0) & = k
  \end{split}
- \] + \]" src="form_2717.png"/>

    We write it in terms of a first order ode:

    -\[
+<picture><source srcset=\[
  \begin{matrix}
    y_0' & -y_1      & = 0 \\
    y_1' & + k^2 y_0 & = 0
  \end{matrix}
- \] + \]" src="form_2718.png"/>

    -

    That is, $F(y', y, t) = y' + A y = 0 $ where

    -\[
+<p>That is, <picture><source srcset=$F(y', y, t) = y' + A y = 0 $ where

    +\[
  A =
  \begin{pmatrix}
  0 & -1 \\
  k^2 &0
  \end{pmatrix}
- \] + \]" src="form_2720.png"/>

    -

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    -

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
- = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    -

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    +

    and $y(0)=(0, k)$, $y'(0) = (k, 0)$.

    +

    The exact solution is $y_0(t) = \sin(k t)$, $y_1(t) = y_0'(t)
+ = k \cos(k t)$, $y_1'(t) = -k^2 \sin(k t)$.

    +

    The Jacobian to assemble is the following: $J = \alpha I + A$.

    This is achieved by the following snippet of code:

    using VectorType = Vector<double>;
    VectorType y(2);
    @@ -330,68 +330,68 @@
    std::function< void(VectorType &)> reinit_vector
    Definition ida.h:898

    A differential algebraic equation (DAE) example

    -

    A more interesting example is a situation where the form $F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    -\begin{align*}
+<p>A more interesting example is a situation where the form <picture><source srcset=$F(y', y, t) = 0$ provides something genuinely more flexible than a typical ordinary differential equation. Specifically, consider the equation

    +\begin{align*}
    u'(t) &= av(t),
    \\
    0 &= v(t) - u(t).
- \end{align*} + \end{align*}" src="form_2726.png"/>

    -

    One can combine the two variables into $y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    -\[
+<p> One can combine the two variables into <picture><source srcset=$y(t) = [u(t), v(t)]^T$. Here, one of the two variables does not have a time derivative. In applications, this is often the case when one variable evolves in time (here, $u(t)$) on its own time scale, and the other one finds its value as a function of the former on a much faster time scale. In the current context, we could of course easily eliminate $v(t)$ using the second equation, and would then just be left with the equation

    +\[
    u'(t) = au(t)
- \] + \]" src="form_2730.png"/>

    -

    which has solution $u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    -\begin{align*}
+<p> which has solution <picture><source srcset=$u(t) = u(0)e^{at}$. But this is, in general, not easily possible if the two variables are related by differential operators. In fact, this happens quite frequently in application. Take, for example, the time-dependent Stokes equations:

    +\begin{align*}
    \frac{\partial \mathbf u(\mathbf x,t)}{\partial t}
    - \nu \Delta \mathbf u(\mathbf x,t) + \nabla p(\mathbf x,t)
    &= \mathbf f(\mathbf x,t),
    \\
    \nabla \cdot \mathbf u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2732.png"/>

    -

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    +

    Here, the fluid velocity $\mathbf u(\mathbf x,t)$ evolves over time, and the pressure is always in equilibrium with the flow because the Stokes equations are derived under the assumption that the speed of sound (at which pressure perturbations propagate) is much larger than the fluid velocity. As a consequence, there is no time derivative on the pressure available in the equation, but unlike the simple model problem above, the pressure can not easily be eliminated from the system. Similar situations happen in step-21, step-31, step-32, step-43, and others, where a subset of variables is always in instantaneous equilibrium with another set of variables that evolves on a slower time scale.

    Another case where we could eliminate a variable but do not want to is where that additional variable is introduced in the first place to work around some other problem. As an example, consider the time dependent version of the biharmonic problem we consider in step-47 (as well as some later ones). The equations we would then be interested in would read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta^2 u(\mathbf x,t) &=
    f(\mathbf x,t).
- \end{align*} + \end{align*}" src="form_2734.png"/>

    -

    As discussed in step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    -\begin{align*}
+<p> As discussed in <a class=step-47, the difficulty is the presence of the fourth derivatives. One way in which one can address this is by introducing an auxiliary variable $v=\Delta u$ which would render the problem into the following one that only ever has second derivatives which we know how to deal with:

    +\begin{align*}
    \frac{\partial u(\mathbf x,t)}{\partial t} + \Delta v(\mathbf x,t) &=
    f(\mathbf x,t),
    \\
    v(\mathbf x,t)-\Delta u(\mathbf x,t) &= 0.
- \end{align*} + \end{align*}" src="form_2736.png"/>

    -

    Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for $v$.

    -

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    -\begin{align*}
+<p> Here, the introduction of the additional variable was voluntary, and could be undone, but we don't want that of course. Rather, we end up with a differential-algebraic equation because the equations do not have a time derivative for <picture><source srcset=$v$.

    +

    Rather than show how to solve the trivial (linear) case above, let us instead consider the situation where we introduce another variable $v$ that is related to $u$ by the nonlinear relationship $v=u^p$, $p\ge 1$:

    +\begin{align*}
    u'(t) &= a v(t)^{1/p},
    \\
    0 &= v(t) - u(t)^p.
- \end{align*} + \end{align*}" src="form_2739.png"/>

    We will impose initial conditions as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    u(0) &= 1 \\
    v(0) &= 1.
- \end{align*} + \end{align*}" src="form_2740.png"/>

    -

    The problem continues to have the solution $u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    -\[
+<p> The problem continues to have the solution <picture><source srcset=$u(t)=e^{at}$ with the auxiliary variable satisfying $v(t)=[e^{at}]^p$. One would implement all of this using the following little program where you have to recall that

    +\[
    F = \begin{pmatrix}u' -a v^{1/p} \\ -u^p + v \end{pmatrix}
- \] + \]" src="form_2743.png"/>

    and that the Jacobian we need to provide is

    -\[
+<picture><source srcset=\[
    J(\alpha) =
    = \dfrac{\partial F}{\partial y} +
      \alpha \dfrac{\partial F}{\partial \dot y}
    = \begin{pmatrix} \alpha && -av^{1/p-1}/p \\ -pu^{p-1} & 1 \end{pmatrix}
- \] + \]" src="form_2744.png"/>

    All of this can be implemented using the following code:

    const double a = 1.0;
    const double p = 1.5;
    @@ -447,30 +447,30 @@
    time_stepper.solve_dae(y, y_dot);
    std::function< void(const VectorType &rhs, VectorType &dst, const double tolerance)> solve_with_jacobian
    Definition ida.h:995
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
    -

    Note that in this code, we not only provide initial conditions for $u$ and $v$, but also for $u'$ and $v'$. We can do this here because we know what the exact solution is.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-11-15 06:44:21.619601818 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1IDA_1_1AdditionalData.html 2024-11-15 06:44:21.619601818 +0000 @@ -179,14 +179,14 @@
    -

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
-y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    +

    IDA is a Differential Algebraic solver. As such, it requires initial conditions also for the first order derivatives. If you do not provide consistent initial conditions, (i.e., conditions for which $F(\dot
+y(0), y(0), 0) = 0)$, you can ask SUNDIALS to compute initial conditions for you by specifying InitialConditionCorrection for the initial conditions both at the initial_time (ic_type) and after a reset has occurred (reset_type).

    - -
    Enumerator
    none&#href_anchor"fielddoc">

    Do not try to make initial conditions consistent.

    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    +
    use_y_diff 

    Compute the algebraic components of $y$ and differential components of $\dot y$, given the differential components of $y$. This option requires that the user specifies differential and algebraic components in the function IDA::differential_components().

    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    +
    use_y_dot 

    Compute all components of $y$, given $\dot y$.

    @@ -565,8 +565,8 @@

    Type of correction for initial conditions.

    -

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    -

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    +

    If you do not provide consistent initial conditions, (i.e., conditions for which $F(y_dot(0), y(0), 0) = 0$), you can ask SUNDIALS to compute initial conditions for you by using the ic_type parameter at construction time.

    +

    Notice that you could in principle use this capabilities to solve for steady state problems by setting y_dot to zero, and asking to compute $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver used inside IDA may not be robust enough for complex problems with several millions unknowns.

    Definition at line 775 of file ida.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-11-15 06:44:21.647602068 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL.html 2024-11-15 06:44:21.647602068 +0000 @@ -188,48 +188,48 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    class SUNDIALS::KINSOL< VectorType >

    Interface to SUNDIALS' nonlinear solver (KINSOL).

    -

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
-= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    +

    KINSOL is a solver for nonlinear algebraic systems in residual form $F(u)
+= 0$ or fixed point form $G(u) = u$, where $u$ is a vector which we will assume to be in ${\mathbb R}^n$ or ${\mathbb C}^n$, but that may also have a block structure and may be distributed in parallel computations; the functions $F$ and $G$ satisfy $F,G:{\mathbb R}^N \to{\mathbb R}^N$ or $F,G:{\mathbb C}^N \to{\mathbb C}^N$. It includes a Newton-Krylov solver as well as Picard and fixed point solvers, both of which can be accelerated with Anderson acceleration. KINSOL is based on the previous Fortran package NKSOL of Brown and Saad. An example of using KINSOL can be found in the step-77 tutorial program.

    KINSOL's Newton solver employs the inexact Newton method. As this solver is intended mainly for large systems, the user is required to provide their own solver function.

    At the highest level, KINSOL implements the following iteration scheme:

      -
    • set $u_0$ = an initial guess
    • -
    • For $n = 0, 1, 2, \ldots$ until convergence do:
        -
      • Solve $J(u_n)\delta_n = -F(u_n)$
      • -
      • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
      • +
      • set $u_0$ = an initial guess
      • +
      • For $n = 0, 1, 2, \ldots$ until convergence do:
          +
        • Solve $J(u_n)\delta_n = -F(u_n)$
        • +
        • Set $u_{n+1} = u_n + \lambda \delta_n, 0 < \lambda \leq 1$
        • Test for convergence
      -

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      +

      Here, $u_n$ is the $n$-th iterate to $u$, and $J(u) = \nabla_u F(u)$ is the system Jacobian. At each stage in the iteration process, a scalar multiple of the step $\delta_n$, is added to $u_n$ to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

      Unless specified otherwise by the user, KINSOL strives to update Jacobian information as infrequently as possible to balance the high costs of matrix operations against other costs. Specifically, these updates occur when:

      • the problem is initialized,
      • -
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • +
      • $\|\lambda \delta_{n-1} \|_{D_u,\infty} \geq 1.5$ (inexact Newton only, see below for a definition of $\| \cdot \|_{D_u,\infty}$)
      • a specified number of nonlinear iterations have passed since the last update,
      • the linear solver failed recoverably with outdated Jacobian information,
      • the global strategy failed with outdated Jacobian information, or
      • -
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.
      • +
      • $\|\lambda \delta_{n} \|_{D_u,\infty} \leq $ tolerance with outdated Jacobian information.

      KINSOL allows changes to the above strategy through optional solver inputs. The user can disable the initial Jacobian information evaluation or change the default value of the number of nonlinear iterations after which a Jacobian information update is enforced.

      -

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      +

      To address the case of ill-conditioned nonlinear systems, KINSOL allows prescribing scaling factors both for the solution vector and for the residual vector. For scaling to be used, the user may supply the function get_solution_scaling(), that returns values $D_u$, which are diagonal elements of the scaling matrix such that $D_u u_n$ has all components roughly the same magnitude when $u_n$ is close to a solution, and get_function_scaling(), that supply values $D_F$, which are diagonal scaling matrix elements such that $D_F F$ has all components roughly the same magnitude when $u_n$ is not too close to a solution.

      When scaling values are provided for the solution vector, these values are automatically incorporated into the calculation of the perturbations used for the default difference quotient approximations for Jacobian information if the user does not supply a Jacobian solver through the solve_with_jacobian() function.

      -

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      +

      Two methods of applying a computed step $\delta_n$ to the previously computed solution vector are implemented. The first and simplest is the standard Newton strategy which applies the update with a constant $\lambda$ always set to 1. The other method is a global strategy, which attempts to use the direction implied by $\delta_n$ in the most efficient way for furthering convergence of the nonlinear problem. This technique is implemented in the second strategy, called Linesearch. This option employs both the $\alpha$ and $\beta$ conditions of the Goldstein-Armijo linesearch algorithm given in [DennisSchnabel96] , where $\lambda$ is chosen to guarantee a sufficient decrease in $F$ relative to the step length as well as a minimum step length relative to the initial rate of decrease of $F$. One property of the algorithm is that the full Newton step tends to be taken close to the solution.

      The basic fixed-point iteration scheme implemented in KINSOL is given by:

        -
      • Set $u_0 =$ an initial guess
      • -
      • For $n = 0, 1, 2, \dots$ until convergence do:
          -
        • Set $u_{n+1} = G(u_n)$
        • +
        • Set $u_0 =$ an initial guess
        • +
        • For $n = 0, 1, 2, \dots$ until convergence do:
            +
          • Set $u_{n+1} = G(u_n)$
          • Test for convergence
        -

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        -

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        -

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        +

        At each stage in the iteration process, function $G$ is applied to the current iterate to produce a new iterate, $u_{n+1}$. A test for convergence is made before the iteration continues.

        +

        For Picard iteration, as implemented in KINSOL, we consider a special form of the nonlinear function $F$, such that $F(u) = Lu - N(u)$, where $L$ is a constant nonsingular matrix and $N$ is (in general) nonlinear.

        +

        Then the fixed-point function $G$ is defined as $G(u) = u - L^{-1}F(u)$. Within each iteration, the Picard step is computed then added to $u_n$ to produce the new iterate. Next, the nonlinear residual function is evaluated at the new iterate, and convergence is checked. The Picard and fixed point methods can be significantly accelerated using Anderson's acceleration method.

        The user has to provide the implementation of the following std::functions:

        • reinit_vector; and only one of
        • residual; or
        • iteration_function;
        -

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        +

        Specifying residual() allows the user to use Newton and Picard strategies (i.e., $F(u)=0$ will be solved), while specifying iteration_function(), a fixed point iteration will be used (i.e., $G(u)=u$ will be solved). An error will be thrown if iteration_function() is set for Picard or Newton.

        If the use of a Newton or Picard method is desired, then the user should also supply

        • solve_with_jacobian; and optionally
        • setup_jacobian;
        • @@ -430,7 +430,7 @@
    -

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    +

    A function object that users should supply and that is intended to compute the iteration function $G(u)$ for the fixed point iteration. This function is only used if the SolutionStrategy::fixed_point strategy is selected.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 512 of file kinsol.h.

    @@ -452,14 +452,14 @@

    A function object that users may supply and that is intended to prepare the linear solver for subsequent calls to solve_with_jacobian().

    The job of setup_jacobian() is to prepare the linear solver for subsequent calls to solve_with_jacobian(), in the solution of linear systems $Ax = b$. The exact nature of this system depends on the SolutionStrategy that has been selected.

    -

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
-F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    +

    In the cases strategy = SolutionStrategy::newton or SolutionStrategy::linesearch, $A$ is the Jacobian $J = \partial
+F/\partial u$. If strategy = SolutionStrategy::picard, $A$ is the approximate Jacobian matrix $L$. If strategy = SolutionStrategy::fixed_point, then linear systems do not arise, and this function is never called.

    The setup_jacobian() function may call a user-supplied function, or a function within the linear solver group, to compute Jacobian-related data that is required by the linear solver. It may also preprocess that data as needed for solve_with_jacobian(), which may involve calling a generic function (such as for LU factorization) or, more generally, build preconditioners from the assembled Jacobian. In any case, the data so generated may then be used whenever a linear system is solved.

    The point of this function is that setup_jacobian() function is not called at every Newton iteration, but only as frequently as the solver determines that it is appropriate to perform the setup task. In this way, Jacobian-related data generated by setup_jacobian() is expected to be used over a number of Newton iterations. KINSOL determines itself when it is beneficial to regenerate the Jacobian and associated information (such as preconditioners computed for the Jacobian), thereby saving the effort to regenerate the Jacobian matrix and a preconditioner for it whenever possible.

    Parameters
    - +
    current_uCurrent value of $u$
    current_fCurrent value of $F(u)$ or $G(u)$
    current_fCurrent value of $F(u)$ or $G(u)$
    @@ -484,12 +484,12 @@

    A function object that users may supply and that is intended to solve a linear system with the Jacobian matrix. This function will be called by KINSOL (possibly several times) after setup_jacobian() has been called at least once. KINSOL tries to do its best to call setup_jacobian() the minimum number of times. If convergence can be achieved without updating the Jacobian, then KINSOL does not call setup_jacobian() again. If, on the contrary, internal KINSOL convergence tests fail, then KINSOL calls setup_jacobian() again with updated vectors and coefficients so that successive calls to solve_with_jacobian() lead to better convergence in the Newton process.

    If you do not specify a solve_with_jacobian function, then only a fixed point iteration strategy can be used. Notice that this may not converge, or may converge very slowly.

    -

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    +

    A call to this function should store in dst the result of $J^{-1}$ applied to rhs, i.e., $J \cdot dst = rhs$. It is the user's responsibility to set up proper solvers and preconditioners inside this function (or in the setup_jacobian callback above). The function attached to this callback is also provided with a tolerance to the linear solver, indicating that it is not necessary to solve the linear system with the Jacobian matrix exactly, but only to a tolerance that KINSOL will adapt over time.

    Arguments to the function are:

    Parameters
    - +
    [in]rhsThe system right hand side to solve for.
    [out]dstThe solution of $J^{-1} * src$.
    [out]dstThe solution of $J^{-1} * src$.
    [in]toleranceThe tolerance with which to solve the linear system of equations.
    @@ -514,7 +514,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    +

    The intent for this scaling factor is for problems in which the different components of a solution have vastly different numerical magnitudes – typically because they have different physical units and represent different things. For example, if one were to solve a nonlinear Stokes problem, the solution vector has components that correspond to velocities and other components that correspond to pressures. These have different physical units and depending on which units one chooses, they may have roughly comparable numerical sizes or maybe they don't. To give just one example, in simulations of flow in the Earth's interior, one has velocities on the order of maybe ten centimeters per year, and pressures up to around 100 GPa. If one expresses this in SI units, this corresponds to velocities of around $0.000,000,003=3 \times 10^{-9}$ m/s, and pressures around $10^9 \text{kg}/\text{m}/\text{s}^2$, i.e., vastly different. In such cases, computing the $l_2$ norm of a solution-type vector (e.g., the difference between the previous and the current solution) makes no sense because the norm will either be dominated by the velocity components or the pressure components. The scaling vector this function returns is intended to provide each component of the solution with a scaling factor that is generally chosen as the inverse of a "typical velocity" or "typical pressure" so that upon multiplication of a vector component by the corresponding scaling vector component, one obtains a number that is of order of magnitude of one (i.e., a reasonably small multiple of one times the typical velocity/pressure). The KINSOL manual states this as follows: "The user should supply values \_form#href_anchor".

    If no function is provided to a KINSOL object, then this is interpreted as implicitly saying that all of these scaling factors should be considered as one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.
    @@ -536,7 +536,7 @@

    A function object that users may supply and that is intended to return a vector whose components are the weights used by KINSOL to compute the vector norm of the function evaluation away from the solution. The implementation of this function is optional, and it is used only if implemented.

    -

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    +

    The point of this function and the scaling vector it returns is similar to the one discussed above for get_solution_scaling, except that it is for a vector that scales the components of the function $F(U)$, rather than the components of $U$, when computing norms. As above, if no function is provided, then this is equivalent to using a scaling vector whose components are all equal to one.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. In particular, KINSOL can deal with "recoverable" errors in some circumstances, so callbacks can throw exceptions of type RecoverableUserCallbackError.

    Definition at line 674 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-11-15 06:44:21.671602283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSUNDIALS_1_1KINSOL_1_1AdditionalData.html 2024-11-15 06:44:21.671602283 +0000 @@ -433,7 +433,7 @@
    -

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    +

    A scalar used as a stopping tolerance on the scaled maximum norm of the system function $F(u)$ or $G(u)$.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 366 of file kinsol.h.

    @@ -533,7 +533,7 @@
    -

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    +

    The relative error in computing $F(u)$, which is used in the difference quotient approximation to the Jacobian matrix when the user does not supply a solve_with_jacobian() function.

    If set to zero, default values provided by KINSOL will be used.

    Definition at line 410 of file kinsol.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-11-15 06:44:21.743602926 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScaLAPACKMatrix.html 2024-11-15 06:44:21.743602926 +0000 @@ -373,15 +373,15 @@

    Detailed Description

    template<typename NumberType>
    class ScaLAPACKMatrix< NumberType >

    A wrapper class around ScaLAPACK parallel dense linear algebra.

    -

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    -

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    +

    ScaLAPACK assumes that matrices are distributed according to the block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed into $\lceil M / MB \rceil$ by $\lceil N / NB \rceil$ blocks which are then uniformly distributed across the 2d process grid with $p q \le Np$ processes, where $p,q$ are grid dimensions and $Np$ is the total number of processes. The parameters MB and NB are referred to as row and column block size and determine the granularity of the block-cyclic distribution.

    +

    In the following the block-cyclic distribution of a $10 \times 9$ matrix onto a $3\times 3$ Cartesian process grid with block sizes $\text{MB}=\text{NB}=2$ is displayed.

    Block-Cyclic Distribution
    -

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    +

    Note that the odd number of columns of the local matrices owned by the processes P2, P5 and P8 accounts for $N=9$ not being an integral multiple of $\text{NB}=2$.

    The choice of the block sizes is a compromise between a sufficiently large size for efficient local/serial BLAS, but one that is also small enough to achieve good parallel load balance.

    Below we show a strong scaling example of ScaLAPACKMatrix::invert() on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets 2.20GHz, 10 cores/socket. Calculations are performed on square processor grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.

    @@ -626,7 +626,7 @@

    Constructor for a rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 80 of file scalapack.cc.

    @@ -663,7 +663,7 @@

    Constructor for a square matrix of size size, and distributed using the process grid in process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 105 of file scalapack.cc.

    @@ -701,7 +701,7 @@

    Constructor for a general rectangular matrix that is read from the file filename and distributed using the grid process_grid.

    Loads the matrix from file filename using HDF5. In case that deal.II was built without HDF5 a call to this function will cause an exception to be thrown.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 121 of file scalapack.cc.

    @@ -777,7 +777,7 @@

    Initialize the rectangular matrix with n_rows and n_cols and distributed using the grid process_grid.

    -

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameters row_block_size and column_block_size are the block sizes used for the block-cyclic distribution of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 216 of file scalapack.cc.

    @@ -814,7 +814,7 @@

    Initialize the square matrix of size size and distributed using the grid process_grid.

    -

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    +

    The parameter block_size is used for the block-cyclic distribution of the matrix. An identical block size is used for the rows and columns of the matrix. In general, it is recommended to use powers of $2$, e.g. $16,32,64, \dots$.

    Definition at line 290 of file scalapack.cc.

    @@ -1058,9 +1058,9 @@
    -

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Transposing assignment: $\mathbf{A} = \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 980 of file scalapack.cc.

    @@ -1098,13 +1098,13 @@ transpose_B Block Sizes Operation -false $MB_A=MB_B$
    - $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ +false $MB_A=MB_B$
    + $NB_A=NB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}$ -true $MB_A=NB_B$
    - $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ +true $MB_A=NB_B$
    + $NB_A=MB_B$ $\mathbf{A} = a \mathbf{A} + b \mathbf{B}^T$ -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    Definition at line 990 of file scalapack.cc.

    @@ -1127,9 +1127,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$ and $NB_A=NB_B$.

    Definition at line 1046 of file scalapack.cc.

    @@ -1152,9 +1152,9 @@ const ScaLAPACKMatrix< NumberType > & B&#href_anchor"memdoc"> -

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    -

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    -

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    +

    Matrix-addition: $\mathbf{A} = \mathbf{A} + b\, \mathbf{B}^T$

    +

    The matrices $\mathbf{A}$ and $\mathbf{B}$ must have the same process grid.

    +

    The following alignment conditions have to be fulfilled: $MB_A=NB_B$ and $NB_A=MB_B$.

    Definition at line 1056 of file scalapack.cc.

    @@ -1203,24 +1203,24 @@ transpose_A transpose_B Block Sizes Operation -false false $MB_A=MB_C$
    - $NB_A=MB_B$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ +false false $MB_A=MB_C$
    + $NB_A=MB_B$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B} + c \mathbf{C}$ -false true $MB_A=MB_C$
    - $NB_A=NB_B$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ +false true $MB_A=MB_C$
    + $NB_A=NB_B$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A} \cdot \mathbf{B}^T + c \mathbf{C}$ -true false $MB_A=MB_B$
    - $NB_A=MB_C$
    - $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ +true false $MB_A=MB_B$
    + $NB_A=MB_C$
    + $NB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B} + c \mathbf{C}$ -true true $MB_A=NB_B$
    - $NB_A=MB_C$
    - $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ +true true $MB_A=NB_B$
    + $NB_A=MB_C$
    + $MB_B=NB_C$ $\mathbf{C} = b \mathbf{A}^T \cdot \mathbf{B}^T + c \mathbf{C}$ -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ must have the same process grid.

    Definition at line 1066 of file scalapack.cc.

    @@ -1249,11 +1249,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A} \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A} \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_C$, $NB_A=MB_B$ and $NB_B=NB_C$.

    Definition at line 1183 of file scalapack.cc.

    @@ -1282,11 +1282,11 @@ const bool adding = false&#href_anchor"memdoc">

    Matrix-matrix-multiplication using transpose of $\mathbf{A}$.

    -

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    -

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    -

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    -

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    -

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    +

    The optional parameter adding determines whether the result is stored in $\mathbf{C}$ or added to $\mathbf{C}$.

    +

    if (adding) $\mathbf{C} = \mathbf{C} + \mathbf{A}^T \cdot \mathbf{B}$

    +

    else $\mathbf{C} = \mathbf{A}^T \cdot \mathbf{B}$

    +

    It is assumed that $\mathbf{A}$ and $\mathbf{B}$ have compatible sizes and that $\mathbf{C}$ already has the right size.

    +

    The following alignment conditions have to be fulfilled: $MB_A=MB_B$, $NB_A=MB_C$ and $NB_B=NB_C$.

    Definition at line 1197 of file scalapack.cc.

    @@ -1314,12 +1314,12 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-11-15 06:44:21.783603283 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classScalarFunctionFromFunctionObject.html 2024-11-15 06:44:21.783603283 +0000 @@ -272,7 +272,7 @@
    Vector<double> solution_1d;
    -

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;
    +

    We will denote this solution function described by this DoFHandler and vector object by $u_h(x)$ where $x$ is a vector with just one component, and consequently is not shown in boldface. Then assume that we want this $u_h(x)$ to be used as a boundary condition for a 2d problem at the line $y=0$. Let's say that this line corresponds to boundary indicator 123. If we say that the 2d problem is associated with

    DoFHandler<2> dof_handler_2d;

    then in order to evaluate the boundary conditions for this 2d problem, we would want to call VectorTools::interpolate_boundary_values() via

    AffineConstraints<double> boundary_values_2d;
    123,
    @@ -280,7 +280,7 @@
    boundary_values_2d);
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
    -

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    The question here is what to use as the Function object that can be passed as third argument. It needs to be a Function<2> object, i.e., it receives a 2d input point and is supposed to return the value at that point. What we want it to do is to just take the $x$ component of the input point and evaluate the 1d solution at that point, knowing that at the boundary with indicator 123, the $y$ component of the input point must be zero. This all can be achieved via the following function object:

    solution_1d_as_function_object (dof_handler_1d, solution_1d);
    auto boundary_evaluator
    = [&] (const Point<2> &p)
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-11-15 06:44:21.811603533 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverBFGS.html 2024-11-15 06:44:21.811603533 +0000 @@ -379,8 +379,8 @@ \]" src="form_2508.png"/>

    starting from initial state x.

    -

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
-\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    +

    The function compute takes two arguments indicating the values of $x$ and of the gradient $g=\nabla f(\mathbf x)=\frac{\partial f}{\partial
+\mathbf x}$. When called, it needs to update the gradient $g$ at the given location $x$ and return the value of the function being minimized, i.e., $f(\mathbf x)$.

    @@ -401,7 +401,7 @@

    Connect a slot to perform a custom line-search.

    -

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    +

    Given the value of function f, the current value of unknown x, the gradient g and the search direction p, return the size $\alpha$ of the step $x \leftarrow x + \alpha p$, and update x, g and f accordingly.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 2024-11-15 06:44:21.843603819 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverCG.html 2024-11-15 06:44:21.843603819 +0000 @@ -1047,7 +1047,7 @@
    -

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    +

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    Definition at line 326 of file solver_cg.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-11-15 06:44:21.871604069 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFIRE.html 2024-11-15 06:44:21.871604069 +0000 @@ -209,27 +209,27 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    +class SolverFIRE< VectorType >

    FIRE (Fast Inertial Relaxation Engine) for minimization of (potentially non-linear) objective function $E(\mathbf x)$, $\mathbf x$ is a vector of $n$ variables ( $n$ is the number of variables of the objective function). Like all other solver classes, it can work on any kind of vector and matrix as long as they satisfy certain requirements (for the requirements on matrices and vectors in order to work with this class, see the documentation of the Solver base class). The type of the solution vector must be passed as template argument, and defaults to Vector<double>.

    FIRE is a damped dynamics method described in Structural Relaxation Made Simple by Bitzek et al. 2006, typically used to find stable equilibrium configurations of atomistic systems in computational material science. Starting from a given initial configuration of the atomistic system, the algorithm relies on inertia to obtain (nearest) configuration with least potential energy.

    Notation:

    -

    Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

      -
    1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
    2. -
    3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
      - $\mathbf x = \mathbf x + \Delta t \mathbf v$,
      - $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
    4. -
    5. Calculate $p = \mathbf g \cdot \mathbf v$.
    6. -
    7. Set $\mathbf v = (1-\alpha) \mathbf v
-                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
    8. -
    9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
    10. -
    11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
    12. +

      Given initial values for $\Delta t$, $\alpha = \alpha_0$, $\epsilon$, $\mathbf x = \mathbf x_0$ and $\mathbf v= \mathbf 0$ along with a given mass matrix $\mathbf M$, FIRE algorithm is as follows,

        +
      1. Calculate $\mathbf g = \nabla E(\mathbf x)$ and check for convergence ( $\mathbf g \cdot \mathbf g < \epsilon^2 $).
      2. +
      3. Update $\mathbf x$ and $V$ using simple (forward) Euler integration step,
        + $\mathbf x = \mathbf x + \Delta t \mathbf v$,
        + $\mathbf v = \mathbf v + \Delta t \mathbf M^{-1} \cdot \mathbf g$.
      4. +
      5. Calculate $p = \mathbf g \cdot \mathbf v$.
      6. +
      7. Set $\mathbf v = (1-\alpha) \mathbf v
+                  + \alpha \frac{|\mathbf v|}{|\mathbf g|} \mathbf g$.
      8. +
      9. If $p<0$ and number of steps since $p$ was last negative is larger than certain value, then increase time step $\Delta t$ and decrease $\alpha$.
      10. +
      11. If $p>0$, then decrease the time step, freeze the system i.e., $\mathbf v = \mathbf 0$ and reset $\alpha = \alpha_0$.
      12. Return to 1.

      Also see Energy-Minimization in Atomic-to-Continuum Scale-Bridging Methods by Eidel et al. 2011.

      @@ -419,7 +419,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

      Solve for x that minimizes $E(\mathbf x)$ for the special case when $E(\mathbf x)
+<p>Solve for x that minimizes <picture><source srcset=$E(\mathbf x)$ for the special case when $E(\mathbf x)
 = \frac{1}{2} \mathbf x^{T} \mathbf A \mathbf x - \mathbf x^{T} \mathbf b$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-11-15 06:44:21.899604319 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverFlexibleCG.html 2024-11-15 06:44:21.899604319 +0000 @@ -230,11 +230,11 @@

    Detailed Description

    template<typename VectorType = Vector<double>>
    -class SolverFlexibleCG< VectorType >

    This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute $\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
-\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
-P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
+class SolverFlexibleCG< VectorType ></div><p>This class implements a flexible variant of the conjugate gradient method, which is based on a different formula to compute <picture><source srcset=$\beta_k$ in the process of constructing a new search direction that is A-orthogonal against the previous one. Rather than using the Fletcher–Reeves update formula with $\beta_k = \frac{\mathbf{r}^T_{k+1} \mathbf{z}_{k+1}}{\mathbf{r}^T_{k}
+\mathbf{z}_{k}}$ for computing the new search direction (here $\mathbf{r}_{k+1}$ is the residual in step $k+1$ and $\mathbf{z}_{k+1} =
+P^{-1} \mathbf{r}_{k+1}$) as in the classical conjugate gradient algorithm, this class selects the Polak-Ribiere formula $\beta_k =
 \frac{\mathbf{r}^T_{k+1} \left(\mathbf{z}_{k+1} -
-\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

    +\mathbf{z}_{k}\right)}{\mathbf{r}^T_{k} \mathbf{z}_{k}}$" src="form_1979.png"/>. The additional term $\mathbf{r}^T_{k+1} \mathbf{z}_{k}$ is zero for linear symmetric-positive definite preconditioners due to the construction of the search directions, so the behavior of SolverFlexibleCG is equivalent for those kinds of situations and merely increases costs by requiring an additional stored vector and associated vector operations. While there are no theoretical guarantees for convergence as in the classical CG algorithm, the current class has been documented to be much more robust for variable preconditioners (e.g., involving some iterative inverse that is not fully converged) or a preconditioner with some slight non-symmetry (like weighted Schwarz methods), which results from the local optimality of the search direction with at least as good progress as the locally optimal steepest descent method.

    Definition at line 358 of file solver_cg.h.

    Member Typedef Documentation

    @@ -1008,7 +1008,7 @@
    -

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    +

    Flag to indicate whether the classical Fletcher–Reeves update formula for the parameter $\beta_k$ (standard CG algorithm, minimal storage needs) or the flexible conjugate gradient method with Polak-Ribiere formula for $\beta_k$ should be used. This base class implementation of SolverCG will always use the former method, whereas the derived class SolverFlexibleCG will use the latter.

    Definition at line 326 of file solver_cg.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-11-15 06:44:21.927604569 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSolverRichardson.html 2024-11-15 06:44:21.927604569 +0000 @@ -440,7 +440,7 @@ const PreconditionerType & preconditioner&#href_anchor"memdoc"> -

    Solve $A^Tx=b$ for $x$.

    +

    Solve $A^Tx=b$ for $x$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-11-15 06:44:21.959604855 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseDirectUMFPACK.html 2024-11-15 06:44:21.959604855 +0000 @@ -590,7 +590,7 @@

    The solution will be returned in place of the right hand side vector.

    Parameters
    - +
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in,out]rhs_and_solutionA vector that contains the right hand side $b$ of a linear system $Ax=b$ upon calling this function, and that contains the solution $x$ of the linear system after calling this function.
    [in]transposeIf set to true, this function solves the linear $A^T x = b$ instead of $Ax=b$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-11-15 06:44:22.023605427 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseILU.html 2024-11-15 06:44:22.023605427 +0000 @@ -1822,7 +1822,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2074,7 +2074,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2107,7 +2107,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2184,7 +2184,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2255,8 +2255,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2284,8 +2284,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-11-15 06:44:22.087605998 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseLUDecomposition.html 2024-11-15 06:44:22.087605998 +0000 @@ -1625,7 +1625,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1970,7 +1970,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2003,7 +2003,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2080,7 +2080,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2151,8 +2151,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2180,8 +2180,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-11-15 06:44:22.151606570 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMIC.html 2024-11-15 06:44:22.155606606 +0000 @@ -401,8 +401,8 @@
    template<typename number>
    class SparseMIC< number >

    Implementation of the Modified Incomplete Cholesky (MIC(0)) preconditioner for symmetric matrices. This class conforms to the state and usage specification in SparseLUDecomposition.

    The decomposition

    -

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
-- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    +

    Let a symmetric, positive-definite, sparse matrix $A$ be in the form $A = D
+- L - L^T$, where $D$ is the diagonal part of $A$ and $-L$ is a strictly lower triangular matrix. The MIC(0) decomposition of the matrix $A$ is defined by $B = (X-L)X^{-1}(X-L^T)$, where $X$ is a diagonal matrix defined by the condition $\text{rowsum}(A) = \text{rowsum}(B)$.

    Definition at line 45 of file sparse_mic.h.

    Member Typedef Documentation

    @@ -1891,7 +1891,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -2143,7 +2143,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2176,7 +2176,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -2253,7 +2253,7 @@

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -2324,8 +2324,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -2353,8 +2353,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-11-15 06:44:22.219607177 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrix.html 2024-11-15 06:44:22.219607177 +0000 @@ -1465,7 +1465,7 @@
    -

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    +

    Symmetrize the matrix by forming the mean value between the existing matrix and its transpose, $A = \frac 12(A+A^T)$.

    This operation assumes that the underlying sparsity pattern represents a symmetric object. If this is not the case, then the result of this operation will not be a symmetric matrix, since it only explicitly symmetrizes by looping over the lower left triangular part for efficiency reasons; if there are entries in the upper right triangle, then these elements are missed in the symmetrization. Symmetrization of the sparsity pattern can be obtain by SparsityPattern::symmetrize().

    @@ -1825,7 +1825,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation, and for the result to actually be a norm it also needs to be either real symmetric or complex hermitian.

    The underlying template types of both this matrix and the given vector should either both be real or complex-valued, but not mixed, for this function to make sense.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1851,7 +1851,7 @@ const Vector< somenumber > & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile).
    @@ -1914,7 +1914,7 @@ const bool rebuild_sparsity_pattern = true&#href_anchor"memdoc">

    Perform the matrix-matrix multiplication C = A * B, or, if an optional vector argument is given, C = A * diag(V) * B, where diag(V) defines a diagonal matrix with the vector entries.

    This function assumes that the calling matrix A and the argument B have compatible sizes. By default, the output matrix C will be resized appropriately.

    -

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    +

    By default, i.e., if the optional argument rebuild_sparsity_pattern is true, the sparsity pattern of the matrix C will be changed to ensure that all entries that result from the product $AB$ can be stored in $C$. This is an expensive operation, and if there is a way to predict the sparsity pattern up front, you should probably build it yourself before calling this function with false as last argument. In this case, the rebuilding of the sparsity pattern is bypassed.

    When setting rebuild_sparsity_pattern to true (i.e., leaving it at the default value), it is important to realize that the matrix C passed as first argument still has to be initialized with a sparsity pattern (either at the time of creation of the SparseMatrix object, or via the SparseMatrix::reinit() function). This is because we could create a sparsity pattern inside the current function, and then associate C with it, but there would be no way to transfer ownership of this sparsity pattern to anyone once the current function finishes. Consequently, the function requires that C be already associated with a sparsity pattern object, and this object is then reset to fit the product of A and B.

    As a consequence of this, however, it is also important to realize that the sparsity pattern of C is modified and that this would render invalid all other SparseMatrix objects that happen to also use that sparsity pattern object.

    @@ -1970,8 +1970,8 @@
    -

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
-columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    +

    Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+columns\ }j}\sum_{\mathrm{all\ rows\ } i} |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that is compatible to the $l_1$-norm for vectors, i.e. $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    @@ -1991,8 +1991,8 @@
    -

    Return the $l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
-|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
+<p>Return the <picture><source srcset=$l_\infty$-norm of the matrix, that is $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+|M_{ij}|$, (max. sum of rows). This is the natural matrix norm that is compatible to the $l_\infty$-norm of vectors, i.e. $|Mv|_\infty \leq
 |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik)

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-11-15 06:44:22.271607642 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixEZ.html 2024-11-15 06:44:22.271607642 +0000 @@ -1219,7 +1219,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    @@ -1242,7 +1242,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult but takes the transposed matrix.

    @@ -1265,7 +1265,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -1288,7 +1288,7 @@ const Vector< somenumber > & src&#href_anchor"memdoc"> -

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    +

    Adding Matrix-vector multiplication. Add $M^T*src$ to $dst$ with $M$ being this matrix. This function does the same as vmult_add but takes the transposed matrix.

    @@ -1397,7 +1397,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    +

    Apply SOR preconditioning matrix to src. The result of this method is $dst = (om D - L)^{-1} src$.

    @@ -1425,7 +1425,7 @@ const number om = 1.&#href_anchor"memdoc"> -

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    +

    Apply transpose SOR preconditioning matrix to src. The result of this method is $dst = (om D - U)^{-1} src$.

    @@ -1460,7 +1460,7 @@
    -

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    +

    Add the matrix A conjugated by B, that is, $B A B^T$ to this object. If the parameter transpose is true, compute $B^T A B$.

    This function requires that B has a const_iterator traversing all matrix entries and that A has a function el(i,j) for access to a specific entry.

    Definition at line 1463 of file sparse_matrix_ez.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-11-15 06:44:22.291607821 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparseMatrixIterators_1_1Iterator.html 2024-11-15 06:44:22.291607821 +0000 @@ -156,7 +156,7 @@

    The typical use for these iterators is to iterate over the elements of a sparse matrix or over the elements of individual rows. Note that there is no guarantee that the elements of a row are actually traversed in an order in which columns monotonically increase. See the documentation of the SparsityPattern class for more information.

    The first template argument denotes the underlying numeric type, the second the constness of the matrix.

    Since there is a specialization of this class for Constness=false, this class is for iterators to constant matrices.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern and SparseMatrix classes. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index and the value of an entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparse matrix at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices and values whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 346 of file sparse_matrix.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-11-15 06:44:22.335608213 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPattern.html 2024-11-15 06:44:22.335608213 +0000 @@ -1174,7 +1174,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$, a diagonal matrix has bandwidth 0, and there are at most $2*q+1$ entries per row if the bandwidth is $q$. The returned quantity is sometimes called "half bandwidth" in the literature.

    Definition at line 673 of file sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-11-15 06:44:22.363608463 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSparsityPatternIterators_1_1Iterator.html 2024-11-15 06:44:22.367608499 +0000 @@ -177,7 +177,7 @@

    Detailed Description

    An iterator class for walking over the elements of a sparsity pattern.

    The typical use for these iterators is to iterate over the elements of a sparsity pattern (or, since they also serve as the basis for iterating over the elements of an associated matrix, over the elements of a sparse matrix), or over the elements of individual rows. There is no guarantee that the elements of a row are actually traversed in an order in which column numbers monotonically increase. See the documentation of the SparsityPattern class for more information.

    -
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.
    +
    Note
    This class operates directly on the internal data structures of the SparsityPattern class. As a consequence, some operations are cheap and some are not. In particular, it is cheap to access the column index of the sparsity pattern entry pointed to. On the other hand, it is expensive to access the row index (this requires $O(\log(N))$ operations for a matrix with $N$ row). As a consequence, when you design algorithms that use these iterators, it is common practice to not loop over all elements of a sparsity pattern at once, but to have an outer loop over all rows and within this loop iterate over the elements of this row. This way, you only ever need to dereference the iterator to obtain the column indices whereas the (expensive) lookup of the row index can be avoided by using the loop index instead.

    Definition at line 279 of file sparsity_pattern.h.

    Member Typedef Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-11-15 06:44:22.399608785 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSphericalManifold.html 2024-11-15 06:44:22.399608785 +0000 @@ -219,20 +219,20 @@ class SphericalManifold< dim, spacedim >

    Manifold description for a spherical space coordinate system.

    You can use this Manifold object to describe any sphere, circle, hypersphere or hyperdisc in two or three dimensions. This manifold can be used as a co-dimension one manifold descriptor of a spherical surface embedded in a higher dimensional space, or as a co-dimension zero manifold descriptor for a body with positive volume, provided that the center of the spherical space is excluded from the domain. An example for the use of this function would be in the description of a hyper-shell or hyper-ball geometry, for example after creating a coarse mesh using GridGenerator::hyper_ball(). (However, it is worth mentioning that generating a good mesh for a disk or ball is complicated and requires addition steps. See the "Possibilities for extensions" section of step-6 for an extensive discussion of how one would construct such meshes and what one needs to do for it.)

    The two template arguments match the meaning of the two template arguments in Triangulation<dim, spacedim>, however this Manifold can be used to describe both thin and thick objects, and the behavior is identical when dim <= spacedim, i.e., the functionality of SphericalManifold<2,3> is identical to SphericalManifold<3,3>.

    -

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    +

    While PolarManifold reflects the usual notion of polar coordinates, it may not be suitable for domains that contain either the north or south poles. Consider for instance the pair of points $x_1=(1,\pi/3,0)$ and $x_2=(1,\pi/3,\pi)$ in polar coordinates (lying on the surface of a sphere with radius one, on a parallel at height $\pi/3$). In this case connecting the points with a straight line in polar coordinates would take the long road around the globe, without passing through the north pole.

    These two points would be connected (using a PolarManifold) by the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   s: [0,1]  & \rightarrow &  \mathbb S^3 \\
           t & \mapsto     &  (1,\pi/3,0) + (0,0,t\pi)
-\end{align*} +\end{align*}" src="form_1525.png"/>

    This curve is not a geodesic on the sphere, and it is not how we would connect those two points. A better curve, would be the one passing through the North pole:

    -\[
+<picture><source srcset=\[
  s(t) = x_1 \cos(\alpha(t)) + \kappa \times x_1 \sin(\alpha(t)) +
  \kappa ( \kappa \cdot x_1) (1-\cos(\alpha(t))).
-\] +\]" src="form_1526.png"/>

    -

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    +

    where $\kappa = \frac{x_1 \times x_2}{\Vert x_1 \times x_2 \Vert}$ and $\alpha(t) = t \cdot \arccos(x_1 \cdot x_2)$ for $t\in[0,1]$. Indeed, this is a geodesic, and it is the natural choice when connecting points on the surface of the sphere. In the examples above, the PolarManifold class implements the first way of connecting two points on the surface of a sphere, while SphericalManifold implements the second way, i.e., this Manifold connects points using geodesics. If more than two points are involved through a SphericalManifold::get_new_points() call, a so-called spherical average is used where the final point minimizes the weighted distance to all other points via geodesics.

    In particular, this class implements a Manifold that joins any two points in space by first projecting them onto the surface of a sphere with unit radius, then connecting them with a geodesic, and finally rescaling the final radius so that the resulting one is the weighted average of the starting radii. This Manifold is identical to PolarManifold in dimension two, while for dimension three it returns points that are more uniformly distributed on the sphere, and it is invariant with respect to rotations of the coordinate system, therefore avoiding the problems that PolarManifold has at the poles. Notice, in particular, that computing tangent vectors at the poles with a PolarManifold is not well defined, while it is perfectly fine with this class.

    For mathematical reasons, it is impossible to construct a unique map of a sphere using only geodesic curves, and therefore, using this class with MappingManifold is discouraged. If you use this Manifold to describe the geometry of a sphere, you should use MappingQ as the underlying mapping, and not MappingManifold.

    This Manifold can be used only on geometries where a ball with finite radius is removed from the center. Indeed, the center is a singular point for this manifold, and if you try to connect two points across the center, they would travel on spherical coordinates, avoiding the center.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-11-15 06:44:22.419608964 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classStridedArrayView.html 2024-11-15 06:44:22.419608964 +0000 @@ -287,7 +287,7 @@
    -

    Return a reference to the $i$th element of the range represented by the current object.

    +

    Return a reference to the $i$th element of the range represented by the current object.

    This function is marked as const because it does not change the view object. It may however return a reference to a non-const memory location depending on whether the template type of the class is const or not.

    This function is only allowed to be called if the underlying data is indeed stored in CPU memory.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-11-15 06:44:22.491609607 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classSymmetricTensor.html 2024-11-15 06:44:22.495609643 +0000 @@ -318,7 +318,7 @@ std::ostream &&#href_anchor"memTemplItemRight" valign="bottom">operator<< (std::ostream &out, const SymmetricTensor< 4, dim, Number > &t) &#href_anchor"details" id="details">

    Detailed Description

    template<int rank_, int dim, typename Number>
    -class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    +class SymmetricTensor< rank_, dim, Number >

    Provide a class that stores symmetric tensors of rank 2,4,... efficiently, i.e. only store those off-diagonal elements of the full tensor that are not redundant. For example, for symmetric $2\times 2$ tensors, this would be the elements 11, 22, and 12, while the element 21 is equal to the 12 element. Within this documentation, second order symmetric tensors are denoted as bold-faced upper-case Latin letters such as $\mathbf A, \mathbf B, \dots$ or bold-faced Greek letters such as $\boldsymbol{\varepsilon}$, $\boldsymbol{\sigma}$. The Cartesian coordinates of a second-order tensor such as $\mathbf A$ are represented as $A_{ij}$ where $i,j$ are indices ranging from 0 to dim-1.

    Using this class for symmetric tensors of rank 2 has advantages over matrices in many cases since the dimension is known to the compiler as well as the location of the data. It is therefore possible to produce far more efficient code than for matrices with runtime-dependent dimension. It is also more efficient than using the more general Tensor class, since fewer elements are stored, and the class automatically makes sure that the tensor represents a symmetric object.

    For tensors of higher rank, the savings in storage are even higher. For example for the $3 \times 3 \times 3 \times 3$ tensors of rank 4, only 36 instead of the full 81 entries have to be stored. These rank 4 tensors are denoted by blackboard-style upper-case Latin letters such as $\mathbb A$ with components $\mathcal{A}_{ijkl}$.

    While the definition of a symmetric rank-2 tensor is obvious, tensors of rank 4 are considered symmetric if they are operators mapping symmetric rank-2 tensors onto symmetric rank-2 tensors. This so-called minor symmetry of the rank 4 tensor requires that for every set of four indices $i, j, k, l$, the identity $\mathcal{C}_{ijkl} = \mathcal{C}_{jikl} =
@@ -647,7 +647,7 @@
   </tr>
 </table>
 </div><div class= -

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    +

    This operator assigns a scalar to a tensor. To avoid confusion with what exactly it means to assign a scalar value to a tensor, zero is the only value allowed for d, allowing the intuitive notation $\mathbf A = 0$ to reset all elements of the tensor to zero.

    @@ -909,8 +909,8 @@
    -

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    -

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    +

    Double contraction product between the present symmetric tensor and a tensor of rank 2. For example, if the present object is the symmetric rank-2 tensor $\mathbf{A}$ and it is multiplied by another symmetric rank-2 tensor $\mathbf{B}$, then the result is the scalar-product double contraction $\mathbf A : \mathbf B = \sum_{i,j} A_{ij} B_{ij}$. In this case, the return value evaluates to a single scalar. While it is possible to define other scalar products (and associated induced norms), this one seems to be the most appropriate one.

    +

    If the present object is a rank-4 tensor such as $\mathbb A$, then the result is a rank-2 tensor $\mathbf C = \mathbb A : \mathbf B$, i.e., the operation contracts over the last two indices of the present object and the indices of the argument, and the result is a tensor of rank 2 ( $C_{ij} = \sum_{k,l} \mathcal{A}_{ijkl} B_{kl}$).

    Note that the multiplication operator for symmetric tensors is defined to be a double contraction over two indices, while it is defined as a single contraction over only one index for regular Tensor objects. For symmetric tensors it therefore acts in a way that is commonly denoted by a "colon multiplication" in the mathematical literature (the two dots of the colon suggesting that it is a contraction over two indices), which corresponds to a scalar product between tensors.

    It is worth pointing out that this definition of operator* between symmetric tensors is different to how the (in general non-symmetric) Tensor class defines operator*, namely as the single-contraction product over the last index of the first operand and the first index of the second operand. For the double contraction of Tensor objects, you will need to use the double_contract() function.

    To maintain at least a modicum of resemblance between the interfaces of Tensor and SymmetricTensor, there are also global functions double_contract() for symmetric tensors that then do the same work as this operator. However, rather than returning the result as a return value, they write it into the first argument to the function in the same way as the corresponding functions for the Tensor class do things.

    @@ -1254,7 +1254,7 @@
    -

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    +

    The opposite of the previous function: given an index $i$ in the unrolled form of the tensor, return what set of indices $(k,l)$ (for rank-2 tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it.

    @@ -1474,7 +1474,7 @@
    -

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    +

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    \[
   \mathbb S : \mathbf A = \mathbf A
 \] @@ -1874,7 +1874,7 @@

    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a rank-2 tensor of size 1, the result is simply zero.

    @@ -1906,11 +1906,11 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_830.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2841 of file symmetric_tensor.h.

    @@ -1940,7 +1940,7 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    Definition at line 2858 of file symmetric_tensor.h.

    @@ -1999,8 +1999,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    @@ -2602,7 +2602,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. In the current case where both arguments are symmetric tensors, this is equivalent to calling the expression A*B which uses SymmetricTensor::operator*().

    Definition at line 3735 of file symmetric_tensor.h.

    @@ -2632,7 +2632,7 @@
    -

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A: \mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if B is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3757 of file symmetric_tensor.h.

    @@ -2662,7 +2662,7 @@
    -

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    +

    Compute the scalar product $\mathbf A:\mathbf B=\sum_{i,j} A_{ij}B_{ij}$ between two tensors $\mathbf A, \mathbf B$ of rank 2. We don't use operator* for this operation since the product between two tensors is usually assumed to be the contraction over the last index of the first tensor and the first index of the second tensor. For example, if A is a Tensor, calling A*B (instead of scalar_product(A,B)) provides $(\mathbf A \cdot\mathbf B)_{ij}=\sum_k A_{ik}B_{kj}$.

    Definition at line 3784 of file symmetric_tensor.h.

    @@ -2968,13 +2968,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3006,13 +3006,13 @@
    -

    The dot product (single contraction) for tensors: Return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors: Return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    As one operand is a Tensor, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, which does the double contraction.
    @@ -3159,7 +3159,7 @@ Initial value:

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    +

    An integer denoting the number of independent components that fully describe a symmetric tensor. In $d$ space dimensions, this number equals $\frac 12 (d^2+d)$ for symmetric tensors of rank 2.

    Definition at line 743 of file symmetric_tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-11-15 06:44:22.531609964 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTableBase.html 2024-11-15 06:44:22.531609964 +0000 @@ -245,7 +245,7 @@

    In some way, this class is similar to the Tensor class, in that it templatizes on the number of dimensions. However, there are two major differences. The first is that the Tensor class stores only numeric values (as doubles), while the Table class stores arbitrary objects. The second is that the Tensor class has fixed sizes in each dimension, also given as a template argument, while this class can handle arbitrary and different sizes in each dimension.

    This has two consequences. First, since the size is not known at compile time, it has to do explicit memory allocation. Second, the layout of individual elements is not known at compile time, so access is slower than for the Tensor class where the number of elements are their location is known at compile time and the compiler can optimize with this knowledge (for example when unrolling loops). On the other hand, this class is of course more flexible, for example when you want a two-dimensional table with the number of rows equal to the number of degrees of freedom on a cell, and the number of columns equal to the number of quadrature points. Both numbers may only be known at run-time, so a flexible table is needed here. Furthermore, you may want to store, say, the gradients of shape functions, so the data type is not a single scalar value, but a tensor itself.

    Dealing with large data sets

    -

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    +

    The Table classes (derived from this class) are frequently used to store large data tables. A modest example is given in step-53 where we store a $380 \times 220$ table of geographic elevation data for a region of Africa, and this data requires about 670 kB if memory; however, tables that store three- or more-dimensional data (say, information about the density, pressure, and temperature in the earth interior on a regular grid of (latitude, longitude, depth) points) can easily run into hundreds of megabytes or more. These tables are then often provided to classes such as InterpolatedTensorProductGridData or InterpolatedUniformGridData.

    If you need to load such tables on single-processor (or multi-threaded) jobs, then there is nothing you can do about the size of these tables: The table just has to fit into memory. But, if your program is parallelized via MPI, then a typical first implementation would create a table object on every process and fill it on every MPI process by reading the data from a file. This is inefficient from two perspectives:

    @@ -1227,7 +1227,7 @@
    -

    Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

    +

    Return an unrolled index in the range $[0,\text{dim}^{\text{rank}}-1]$ for the element of the tensor indexed by the argument to the function.

    @@ -1255,7 +1255,7 @@
    -

    Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

    +

    Opposite of component_to_unrolled_index: For an index in the range $[0, \text{dim}^{\text{rank}}-1]$, return which set of indices it would correspond to.

    @@ -1891,11 +1891,11 @@

    Entrywise multiplication of two tensor objects of general rank.

    This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

    -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

    Template Parameters
    @@ -1934,17 +1934,17 @@
    -

    The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
    -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
    +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

    Definition at line 3039 of file tensor.h.

    @@ -1974,7 +1974,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3065 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-11-15 06:44:22.643610964 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductManifold.html 2024-11-15 06:44:22.643610964 +0000 @@ -233,7 +233,7 @@

    Detailed Description

    template<int dim, int dim_A, int spacedim_A, int chartdim_A, int dim_B, int spacedim_B, int chartdim_B>
    class TensorProductManifold< dim, dim_A, spacedim_A, chartdim_A, dim_B, spacedim_B, chartdim_B >

    Tensor product manifold of two ChartManifolds.

    -

    This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

    +

    This manifold will combine the ChartManifolds A and B given in the constructor to form a new ChartManifold by building the tensor product $A\otimes B$. The first spacedim_A dimensions in the real space and the first chartdim_A dimensions of the chart will be given by manifold A, while the remaining coordinates are given by B. The manifold is to be used by a Triangulation<dim, space_dim_A+space_dim_B>.

    An example usage would be the combination of a SphericalManifold with space dimension 2 and a FlatManifold with space dimension 1 to form a cylindrical manifold.

    pull_back(), push_forward(), and push_forward_gradient() are implemented by splitting the input argument into inputs for A and B according to the given dimensions and applying the corresponding operations before concatenating the result.

    Note
    The dimension arguments dim_A and dim_B are not used.
    @@ -605,24 +605,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -631,11 +631,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-11-15 06:44:22.667611179 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductMatrixSymmetricSum.html 2024-11-15 06:44:22.667611179 +0000 @@ -174,7 +174,7 @@ M_1 \otimes A_0 \end{align*}" src="form_2024.png"/>

    -

    in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

    +

    in 3d. The typical application setting is a discretization of the Laplacian $L$ on a Cartesian (axis-aligned) geometry, where it can be exactly represented by the Kronecker or tensor product of a 1d mass matrix $M$ and a 1d Laplace matrix $A$ in each tensor direction (due to symmetry $M$ and $A$ are the same in each dimension). The dimension of the resulting class is the product of the one-dimensional matrices.

    This class implements two basic operations, namely the usual multiplication by a vector and the inverse. For both operations, fast tensorial techniques can be applied that implement the operator evaluation in $\text{size}(M)^{d+1}$ arithmetic operations, considerably less than $\text{size}(M)^{2d}$ for the naive forward transformation and $\text{size}(M)^{3d}$ for setting up the inverse of $L$.

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to 1964's work by Lynch et al. [Lynch1964],

    \begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html differs (HTML document, ASCII text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html	2024-11-15 06:44:22.691611393 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTensorProductPolynomialsBubbles.html	2024-11-15 06:44:22.695611429 +0000
@@ -170,9 +170,9 @@
 </table>
 <a name=

    Detailed Description

    template<int dim>
    -class TensorProductPolynomialsBubbles< dim >

    A class that represents a space of tensor product polynomials, augmented by $dim$ (non-normalized) bubble functions of form $\varphi_j(\mathbf x)
+class TensorProductPolynomialsBubbles< dim ></div><p>A class that represents a space of tensor product polynomials, augmented by <picture><source srcset=$dim$ (non-normalized) bubble functions of form $\varphi_j(\mathbf x)
 = 2^{\text{degree}-1}\left(x_j-frac 12\right)^{\text{degree}-1}
-\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$ for $j=0,\ldots,dim-1$. If degree is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell.

    +\left[\prod_{i=0}^{dim-1}(x_i(1-x_i))\right]$" src="form_912.png"/> for $j=0,\ldots,dim-1$. If degree is one, then the first factor disappears and one receives the usual bubble function centered at the mid-point of the cell.

    This class inherits most of its functionality from TensorProductPolynomials. The bubble enrichments are added for the last index.

    Definition at line 52 of file tensor_product_polynomials_bubbles.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 2024-11-15 06:44:22.727611715 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTorusManifold.html 2024-11-15 06:44:22.731611750 +0000 @@ -236,7 +236,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Detailed Description

    template<int dim>
    -class TorusManifold< dim >

    Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

    +class TorusManifold< dim >

    Manifold description for the surface of a Torus in three dimensions. The Torus is assumed to be in the x-z plane. The reference coordinate system is given by the angle $phi$ around the y axis, the angle $theta$ around the centerline of the torus, and the distance to the centerline $w$ (between 0 and 1).

    This class was developed to be used in conjunction with GridGenerator::torus.

    Definition at line 861 of file manifold_lib.h.

    @@ -702,7 +702,7 @@
    -

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    +

    Given a point in the chartdim dimensional Euclidean space, this method returns the derivatives of the function $F$ that maps from the chartdim-dimensional to the spacedim-dimensional space. In other words, it is a matrix of size $\text{spacedim}\times\text{chartdim}$.

    This function is used in the computations required by the get_tangent_vector() function. Since not all users of the Manifold class interface will require calling that function, the current function is implemented but will trigger an exception whenever called. This allows derived classes to avoid implementing the push_forward_gradient function if this functionality is not needed in the user program.

    Refer to the general documentation of this class for more information.

    @@ -732,24 +732,24 @@
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    -

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    -\begin{align*}
+<p>Return a vector that, at <picture><source srcset=$\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. See the documentation of the Manifold class and of Manifold::get_tangent_vector() for a more detailed description.

    +

    For the current class, we assume that this geodesic is the image under the push_forward() operation of a straight line of the pre-images of x1 and x2 (where pre-images are computed by pulling back the locations x1 and x2). In other words, if these preimages are $\xi_1=F^{-1}(\mathbf x_1), \xi_2=F^{-1}(\mathbf x_2)$, then the geodesic in preimage (the chartdim-dimensional Euclidean) space is

    +\begin{align*}
   \zeta(t) &= \xi_1 +  t (\xi_2-\xi_1)
  \\          &= F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                             -F^{-1}(\mathbf x_1)\right]
-\end{align*} +\end{align*}" src="form_1516.png"/>

    In image space, i.e., in the space in which we operate, this leads to the curve

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf s(t) &= F(\zeta(t))
  \\          &= F(\xi_1 +  t (\xi_2-\xi_1))
  \\          &= F\left(F^{-1}(\mathbf x_1) + t\left[F^{-1}(\mathbf x_2)
                                     -F^{-1}(\mathbf x_1)\right]\right).
-\end{align*} +\end{align*}" src="form_1517.png"/>

    -

    What the current function is supposed to return is $\mathbf s'(0)$. By the chain rule, this is equal to

    -\begin{align*}
+<p> What the current function is supposed to return is <picture><source srcset=$\mathbf s'(0)$. By the chain rule, this is equal to

    +\begin{align*}
   \mathbf s'(0) &=
     \frac{d}{dt}\left. F\left(F^{-1}(\mathbf x_1)
                        + t\left[F^{-1}(\mathbf x_2)
@@ -758,11 +758,11 @@
 \\ &= \nabla_\xi F\left(F^{-1}(\mathbf x_1)\right)
                    \left[F^{-1}(\mathbf x_2)
                                 -F^{-1}(\mathbf x_1)\right].
-\end{align*} +\end{align*}" src="form_1518.png"/>

    This formula may then have to be slightly modified by considering any periodicity that was assumed in the call to the constructor.

    -

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
-x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    +

    Thus, the computation of tangent vectors also requires the implementation of derivatives $\nabla_\xi F(\xi)$ of the push-forward mapping. Here, $F^{-1}(\mathbf x_2)-F^{-1}(\mathbf x_1)$ is a chartdim-dimensional vector, and $\nabla_\xi F\left(F^{-1}(\mathbf
+x_1)\right) = \nabla_\xi F\left(\xi_1\right)$ is a spacedim-times-chartdim-dimensional matrix. Consequently, and as desired, the operation results in a spacedim-dimensional vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-11-15 06:44:22.771612108 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTransfiniteInterpolationManifold.html 2024-11-15 06:44:22.771612108 +0000 @@ -221,12 +221,12 @@
    template<int dim, int spacedim = dim>
    class TransfiniteInterpolationManifold< dim, spacedim >

    A mapping class that extends curved boundary descriptions into the interior of the computational domain. The outer curved boundary description is assumed to be given by another manifold (e.g. a polar manifold on a circle). The mechanism to extend the boundary information is a so-called transfinite interpolation. The use of this class is discussed extensively in step-65.

    The formula for extending such a description in 2d is, for example, described on Wikipedia. Given a point $(u,v)$ on the chart, the image of this point in real space is given by

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 \mathbf S(u,v) &= (1-v)\mathbf c_0(u)+v \mathbf c_1(u) + (1-u)\mathbf c_2(v)
 + u \mathbf c_3(v) \\
 &\quad - \left[(1-u)(1-v) \mathbf x_0 + u(1-v) \mathbf x_1 + (1-u)v \mathbf
 x_2 + uv \mathbf x_3 \right]
-\end{align*} +\end{align*}" src="form_1537.png"/>

    where $\bf x_0, \bf x_1, \bf x_2, \bf x_3$ denote the four bounding vertices bounding the image space and $\bf c_0, \bf c_1, \bf c_2, \bf c_3$ are the four curves describing the lines of the cell. If a curved manifold is attached to any of these lines, the evaluation is done according to Manifold::get_new_point() with the two end points of the line and appropriate weight. In 3d, the generalization of this formula is implemented, creating a weighted sum of the vertices (positive contribution), the lines (negative), and the faces (positive contribution).

    This manifold is usually attached to a coarse mesh and then places new points as a combination of the descriptions on the boundaries, weighted appropriately according to the position of the point in the original chart coordinates $(u,v)$. This manifold should be preferred over setting only a curved manifold on the boundary of a mesh in most situations as it yields more uniform mesh distributions as the mesh is refined because it switches from a curved description to a straight description over all children of the initial coarse cell this manifold was attached to. This way, the curved nature of the manifold that is originally contained in one coarse mesh layer will be applied to more than one fine mesh layer once the mesh gets refined. Note that the mechanisms of TransfiniteInterpolationManifold are also built into the MappingQ class when only a surface of a cell is subject to a curved description, ensuring that even the default case without this manifold gets optimal convergence rates when applying curved boundary descriptions.

    @@ -932,11 +932,11 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.
    -

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
-x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    -

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    -

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
-s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    +

    Return a vector that, at $\mathbf x_1$, is tangential to the geodesic that connects two points $\mathbf x_1,\mathbf x_2$. The geodesic is the shortest line between these two points, where "shortest" is defined via a metric specific to a particular implementation of this class in a derived class. For example, in the case of a FlatManifold, the shortest line between two points is just the straight line, and in this case the tangent vector is just the difference $\mathbf d=\mathbf
+x_2-\mathbf x_1$. On the other hand, for a manifold that describes a surface embedded in a higher dimensional space (e.g., the surface of a sphere), then the tangent vector is tangential to the surface, and consequently may point in a different direction than the straight line that connects the two points.

    +

    While tangent vectors are often normalized to unit length, the vectors returned by this function are normalized as described in the introduction of this class. Specifically, if $\mathbf s(t)$ traces out the geodesic between the two points where $\mathbf x_1 = \mathbf s(0)$ and $\mathbf x_2 = \mathbf s(1)$, then the returned vector must equal $\mathbf s'(0)$. In other words, the norm of the returned vector also encodes, in some sense, the length of the geodesic because a curve $\mathbf s(t)$ must move "faster" if the two points it connects between arguments $t=0$ and $t=1$ are farther apart.

    +

    The default implementation of this function approximates $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$ for a small value of $\epsilon$, and the evaluation of $\mathbf
+s(\epsilon)$ is done by calling get_new_point(). If possible, derived classes should override this function by an implementation of the exact derivative.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-11-15 06:44:22.835612679 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor.html 2024-11-15 06:44:22.835612679 +0000 @@ -338,7 +338,7 @@
    x1The first point that describes the geodesic, and the one at which the "direction" is to be evaluated.

    Detailed Description

    template<int structdim, int dim, int spacedim>
    -class TriaAccessor< structdim, dim, spacedim >

    A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

    +class TriaAccessor< structdim, dim, spacedim >

    A class that provides access to objects in a triangulation such as its vertices, sub-objects, children, geometric information, etc. This class represents objects of dimension structdim (i.e. 1 for lines, 2 for quads, 3 for hexes) in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in $R^{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

    There is a specialization of this class for the case where structdim equals zero, i.e., for vertices of a triangulation.

    Definition at line 756 of file tria_accessor.h.

    @@ -1599,7 +1599,7 @@

    This function computes a fast approximate transformation from the real to the unit cell by inversion of an affine approximation of the $d$-linear function from the reference $d$-dimensional cell.

    -

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    +

    The affine approximation of the unit to real cell mapping is found by a least squares fit of an affine function to the $2^d$ vertices of the present object. For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping. Thus, this function will return a finite result for all given input points, even in cases where the actual transformation by an actual bi-/trilinear or higher order mapping might be singular. Besides only approximating the mapping from the vertex points, this function also ignores the attached manifold descriptions. The result is only exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    Note
    If dim<spacedim we first project p onto the plane.
    @@ -1647,15 +1647,15 @@
    -

    Return the barycenter (also called centroid) of the object. The barycenter for an object $K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    -\[
+<p>Return the barycenter (also called centroid) of the object. The barycenter for an object <picture><source srcset=$K$ of dimension $d$ in $D$ space dimensions is given by the $D$-dimensional vector $\mathbf x_K$ defined by

    +\[
   \mathbf x_K = \frac{1}{|K|} \int_K \mathbf x \; \textrm{d}x
-\] +\]" src="form_1558.png"/>

    where the measure of the object is given by

    -\[
+<picture><source srcset=\[
   |K| = \int_K \mathbf 1 \; \textrm{d}x.
-\] +\]" src="form_1559.png"/>

    This function assumes that $K$ is mapped by a $d$-linear function from the reference $d$-dimensional cell. Then the integrals above can be pulled back to the reference cell and evaluated exactly (if through lengthy and, compared to the center() function, expensive computations).

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-11-15 06:44:22.879613072 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_011_00_01spacedim_01_4.html 2024-11-15 06:44:22.879613072 +0000 @@ -291,7 +291,7 @@ &#href_anchor"memitem:a34cceffc302e3c23552635478b9fc983" id="r_a34cceffc302e3c23552635478b9fc983">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

    Detailed Description

    template<int spacedim>
    -class TriaAccessor< 0, 1, spacedim >

    This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

    +class TriaAccessor< 0, 1, spacedim >

    This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero and dim is one. This class represents vertices in a one-dimensional triangulation that is embedded in a space of dimensionality spacedim (for spacedim==dim==1 the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim==1 the triangulation is of a manifold embedded in a higher dimensional space).

    The current specialization of the TriaAccessor<0,dim,spacedim> class for vertices of a one-dimensional triangulation exists since in the dim == 1 case vertices are also faces.

    Definition at line 2319 of file tria_accessor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:22.911613358 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriaAccessor_3_010_00_01dim_00_01spacedim_01_4.html 2024-11-15 06:44:22.911613358 +0000 @@ -242,7 +242,7 @@ &#href_anchor"memitem:abda88195917e4d56f80eab016f21bde3" id="r_abda88195917e4d56f80eab016f21bde3">static unsigned int&#href_anchor"memItemRight" valign="bottom">quad_index (const unsigned int i) &#href_anchor"details" id="details">

    Detailed Description

    template<int dim, int spacedim>
    -class TriaAccessor< 0, dim, spacedim >

    This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

    +class TriaAccessor< 0, dim, spacedim >

    This class is a specialization of TriaAccessor<structdim, dim, spacedim> for the case that structdim is zero. This class represents vertices in a triangulation of dimensionality dim (i.e. 1 for a triangulation of lines, 2 for a triangulation of quads, and 3 for a triangulation of hexes) that is embedded in a space of dimensionality spacedim (for spacedim==dim the triangulation represents a domain in ${\mathbb R}^\text{dim}$, for spacedim>dim the triangulation is of a manifold embedded in a higher dimensional space).

    There is a further specialization of this class for the case that dim equals one, i.e., for vertices of a one-dimensional triangulation, since in that case vertices are also faces.

    Definition at line 1907 of file tria_accessor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-11-15 06:44:23.051614609 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTriangulation.html 2024-11-15 06:44:23.051614609 +0000 @@ -1943,7 +1943,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -4918,7 +4918,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-11-15 06:44:23.119615216 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1BlockSparseMatrix.html 2024-11-15 06:44:23.119615216 +0000 @@ -1016,7 +1016,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix. The vector types can be block vectors or non-block vectors (only if the matrix has only one row or column, respectively), and need to define TrilinosWrappers::SparseMatrix::vmult.

    Definition at line 443 of file trilinos_block_sparse_matrix.h.

    @@ -1046,7 +1046,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Definition at line 456 of file trilinos_block_sparse_matrix.h.

    @@ -2132,7 +2132,7 @@
    -

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    +

    Adding Matrix-vector multiplication. Add $M*src$ on $dst$ with $M$ being this matrix.

    @@ -2237,7 +2237,7 @@
    -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    @@ -2634,7 +2634,7 @@
    -

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    +

    Matrix-vector multiplication: let $dst = M*src$ with $M$ being this matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    @@ -2742,7 +2742,7 @@
    -

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    +

    Matrix-vector multiplication: let $dst = M^T*src$ with $M$ being this matrix. This function does the same as vmult() but takes the transposed matrix.

    Due to problems with deriving template arguments between the block and non-block versions of the vmult/Tvmult functions, the actual functions are implemented in derived classes, with implementations forwarding the calls to the implementations provided here under a unique name for which template arguments can be derived by the compiler.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-11-15 06:44:23.175615716 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1BlockVector.html 2024-11-15 06:44:23.175615716 +0000 @@ -1765,7 +1765,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1817,7 +1817,7 @@
    -

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    +

    Return the $l_1$-norm of the vector, i.e. the sum of the absolute values.

    @@ -1843,7 +1843,7 @@
    -

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    +

    Return the $l_2$-norm of the vector, i.e. the square root of the sum of the squares of the elements.

    @@ -1869,7 +1869,7 @@
    -

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    +

    Return the maximum absolute value of the elements of this vector, which is the $l_\infty$-norm of a vector.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-11-15 06:44:23.235616252 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1MPI_1_1Vector.html 2024-11-15 06:44:23.235616252 +0000 @@ -1323,7 +1323,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    @@ -1395,7 +1395,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    @@ -1413,7 +1413,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    @@ -1431,7 +1431,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-11-15 06:44:23.259616467 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1NOXSolver.html 2024-11-15 06:44:23.263616502 +0000 @@ -319,7 +319,7 @@
    -

    A function object that users should supply and that is intended to compute the residual $F(u)$.

    +

    A function object that users should supply and that is intended to compute the residual $F(u)$.

    Note
    This variable represents a user provided callback. See there for a description of how to deal with errors and other requirements and conventions. NOX can not deal with "recoverable" errors for this callback, so if it throws an exception of type RecoverableUserCallbackError, then this exception is treated like any other exception.

    Definition at line 204 of file nox.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-11-15 06:44:23.323617038 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparseMatrix.html 2024-11-15 06:44:23.323617038 +0000 @@ -2108,7 +2108,7 @@
    -

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    +

    Return the square of the norm of the vector $v$ with respect to the norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, e.g. in the finite element context, where the $L_2$ norm of a function equals the matrix norm with respect to the mass matrix of the vector representing the nodal values of the finite element function.

    Obviously, the matrix needs to be quadratic for this operation.

    The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

    The vector has to be initialized with the same IndexSet the matrix was initialized with.

    @@ -2133,7 +2133,7 @@ const MPI::Vector & v&#href_anchor"memdoc"> -

    Compute the matrix scalar product $\left(u,Mv\right)$.

    +

    Compute the matrix scalar product $\left(u,Mv\right)$.

    The implementation of this function is not as efficient as the one in the SparseMatrix class used in deal.II (i.e. the original one, not the Trilinos wrapper class) since Trilinos doesn't support this operation and needs a temporary vector.

    The vector u has to be initialized with the same IndexSet that was used for the row indices of the matrix and the vector v has to be initialized with the same IndexSet that was used for the column indices of the matrix.

    In case of a localized Vector, this function will only work when running on one processor, since the matrix object is inherently distributed. Otherwise, an exception will be thrown.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-11-15 06:44:23.371617467 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classTrilinosWrappers_1_1SparsityPattern.html 2024-11-15 06:44:23.371617467 +0000 @@ -467,7 +467,7 @@
    -

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    +

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    It is possible to specify the number of columns entries per row using the optional n_entries_per_row argument. However, this value does not need to be accurate or even given at all, since one does usually not have this kind of information before building the sparsity pattern (the usual case when the function DoFTools::make_sparsity_pattern() is called). The entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes. However, a good estimate will reduce the setup time of the sparsity pattern.

    Definition at line 100 of file trilinos_sparsity_pattern.cc.

    @@ -497,7 +497,7 @@
    -

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    +

    Generate a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally, too.

    The vector n_entries_per_row specifies the number of entries in each row (an information usually not available, though).

    Definition at line 109 of file trilinos_sparsity_pattern.cc.

    @@ -769,7 +769,7 @@
    -

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    +

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    The number of columns entries per row is specified as the maximum number of entries argument. This does not need to be an accurate number since the entries are allocated dynamically in a similar manner as for the deal.II DynamicSparsityPattern classes, but a good estimate will reduce the setup time of the sparsity pattern.

    Definition at line 214 of file trilinos_sparsity_pattern.cc.

    @@ -799,7 +799,7 @@
    -

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    +

    Initialize a sparsity pattern that is completely stored locally, having $m$ rows and $n$ columns. The resulting matrix will be completely stored locally.

    The vector n_entries_per_row specifies the number of entries in each row.

    Definition at line 227 of file trilinos_sparsity_pattern.cc.

    @@ -1300,7 +1300,7 @@
    -

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    +

    Compute the bandwidth of the matrix represented by this structure. The bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ represents a nonzero entry of the matrix. Consequently, the maximum bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$.

    Definition at line 878 of file trilinos_sparsity_pattern.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-11-15 06:44:23.411617824 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1Partitioner.html 2024-11-15 06:44:23.411617824 +0000 @@ -323,7 +323,7 @@ const MPI_Comm communicator&#href_anchor"memdoc">

    Constructor that takes the number of locally-owned degrees of freedom local_size and the number of ghost degrees of freedom ghost_size.

    -

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    +

    The local index range is translated to global indices in an ascending and one-to-one fashion, i.e., the indices of process $p$ sit exactly between the indices of the processes $p-1$ and $p+1$, respectively.

    Note
    Setting the ghost_size variable to an appropriate value provides memory space for the ghost data in a vector's memory allocation as and allows access to it via local_element(). However, the associated global indices must be handled externally in this case.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-11-15 06:44:23.435618038 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classUtilities_1_1MPI_1_1ProcessGrid.html 2024-11-15 06:44:23.435618038 +0000 @@ -238,7 +238,7 @@ const unsigned int column_block_size&#href_anchor"memdoc">

    Constructor for a process grid for a given mpi_communicator. In this case the process grid is heuristically chosen based on the dimensions and block-cyclic distribution of a target matrix provided in n_rows_matrix, n_columns_matrix, row_block_size and column_block_size.

    -

    The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

    +

    The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of processes in the mpi_communicator. This function then creates a 2d processor grid assuming the ratio between number of process row $p$ and columns $q$ to be equal the ratio between matrix dimensions $M$ and $N$.

    For example, a square matrix $640x640$ with the block size $32$ and the mpi_communicator with 11 cores will result in the $3x3$ process grid.

    Definition at line 208 of file process_grid.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-11-15 06:44:23.527618860 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classVector.html 2024-11-15 06:44:23.527618860 +0000 @@ -1345,7 +1345,7 @@
    -

    Return the square of the $l_2$-norm.

    +

    Return the square of the $l_2$-norm.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
    @@ -1387,7 +1387,7 @@
    -

    $l_1$-norm of the vector. The sum of the absolute values.

    +

    $l_1$-norm of the vector. The sum of the absolute values.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
    @@ -1408,7 +1408,7 @@
    -

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    +

    $l_2$-norm of the vector. The square root of the sum of the squares of the elements.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
    @@ -1429,7 +1429,7 @@
    -

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    +

    $l_p$-norm of the vector. The pth root of the sum of the pth powers of the absolute values of the elements.

    Note
    If deal.II is configured with threads, this operation will run multi-threaded by splitting the work into smaller chunks (assuming there is enough work to make this worthwhile). The algorithm uses pairwise summation with the same order of summation in every run, which gives fully repeatable results from one run to another.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-11-15 06:44:23.583619360 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classhp_1_1FECollection.html 2024-11-15 06:44:23.583619360 +0000 @@ -1336,7 +1336,7 @@

    Return a block mask with as many elements as this object has blocks and of which exactly the one component is true that corresponds to the given argument. See the glossary for more information.

    -
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the scalar referenced by the argument encompasses a complete block. In other words, if, for example, you pass an extractor for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single scalar object you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
    Parameters
    @@ -1432,7 +1432,7 @@

    Given a component mask (see this glossary entry ), produce a block mask (see this glossary entry ) that represents the blocks that correspond to the components selected in the input argument. This is essentially a conversion operator from ComponentMask to BlockMask.

    -
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    +
    Note
    This function will only succeed if the components referenced by the argument encompasses complete blocks. In other words, if, for example, you pass an component mask for the single $x$ velocity and this object represents an FE_RaviartThomas object, then the single component you selected is part of a larger block and consequently there is no block mask that would represent it. The function will then produce an exception.
    This function is the equivalent of FiniteElement::component_mask() with the same arguments. It verifies that it gets the same result from every one of the elements that are stored in this FECollection. If this is not the case, it throws an exception.
    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-11-15 06:44:23.603619539 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classinternal_1_1MappingQImplementation_1_1InverseQuadraticApproximation.html 2024-11-15 06:44:23.603619539 +0000 @@ -179,7 +179,7 @@
    Parameters
    - +
    real_support_pointsThe position of the mapping support points in real space, queried by MappingQ::compute_mapping_support_points().
    unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
    unit_support_pointsThe location of the support points in reference coordinates $[0, 1]^d$ that map to the mapping support points in real space by a polynomial map.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-11-15 06:44:23.743620789 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1DistributedTriangulationBase.html 2024-11-15 06:44:23.747620825 +0000 @@ -1892,7 +1892,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2459,7 +2459,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7059,7 +7059,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-11-15 06:44:23.891622111 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1TriangulationBase.html 2024-11-15 06:44:23.895622147 +0000 @@ -1801,7 +1801,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2415,7 +2415,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7036,7 +7036,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-11-15 06:44:24.055623576 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation.html 2024-11-15 06:44:24.059623612 +0000 @@ -2114,7 +2114,7 @@
    -

    Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

    +

    Return a permutation vector for the order the coarse cells are handed off to p4est. For example the value of the $i$th element in this vector is the index of the deal.II coarse cell (counting from begin(0)) that corresponds to the $i$th tree managed by p4est.

    Definition at line 3696 of file tria.cc.

    @@ -2998,7 +2998,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -3457,7 +3457,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7791,7 +7791,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:24.211624970 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1distributed_1_1Triangulation_3_011_00_01spacedim_01_4.html 2024-11-15 06:44:24.211624970 +0000 @@ -2279,7 +2279,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2850,7 +2850,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7337,7 +7337,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-11-15 06:44:24.367626363 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1fullydistributed_1_1Triangulation.html 2024-11-15 06:44:24.367626363 +0000 @@ -2427,7 +2427,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2931,7 +2931,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7267,7 +7267,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-11-15 06:44:24.519627720 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/classparallel_1_1shared_1_1Triangulation.html 2024-11-15 06:44:24.523627756 +0000 @@ -2214,7 +2214,7 @@

    When vertices have been moved locally, for example using code like

    cell->vertex(0) = new_location;

    then this function can be used to update the location of vertices between MPI processes.

    -

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    +

    All the vertices that have been moved and might be in the ghost layer of a process have to be reported in the vertex_locally_moved argument. This ensures that that part of the information that has to be send between processes is actually sent. Additionally, it is quite important that vertices on the boundary between processes are reported on exactly one process (e.g. the one with the highest id). Otherwise we could expect undesirable results if multiple processes move a vertex differently. A typical strategy is to let processor $i$ move those vertices that are adjacent to cells whose owners include processor $i$ but no other processor $j$ with $j<i$; in other words, for vertices at the boundary of a subdomain, the processor with the lowest subdomain id "owns" a vertex.

    Note
    It only makes sense to move vertices that are either located on locally owned cells or on cells in the ghost layer. This is because you can be sure that these vertices indeed exist on the finest mesh aggregated over all processors, whereas vertices on artificial cells but not at least in the ghost layer may or may not exist on the globally finest mesh. Consequently, the vertex_locally_moved argument may not contain vertices that aren't at least on ghost cells.
    This function moves vertices in such a way that on every processor, the vertices of every locally owned and ghost cell is consistent with the corresponding location of these cells on other processors. On the other hand, the locations of artificial cells will in general be wrong since artificial cells may or may not exist on other processors and consequently it is not possible to determine their location in any way. This is not usually a problem since one never does anything on artificial cells. However, it may lead to problems if the mesh with moved vertices is refined in a later step. If that's what you want to do, the right way to do it is to save the offset applied to every vertex, call this function, and before refining or coarsening the mesh apply the opposite offset and call this function again.
    @@ -2702,7 +2702,7 @@
    -

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    +

    Refine all cells times times. In other words, in each one of the times iterations, loop over all cells and refine each cell uniformly into $2^\text{dim}$ children. In practice, this function repeats the following operations times times: call set_all_refine_flags() followed by execute_coarsening_and_refinement(). The end result is that the number of cells increases by a factor of $(2^\text{dim})^\text{times}=2^{\text{dim} \times \text{times}}$.

    The execute_coarsening_and_refinement() function called in this loop may throw an exception if it creates cells that are distorted (see its documentation for an explanation). This exception will be propagated through this function if that happens, and you may not get the actual number of refinement steps in that case.

    Note
    This function triggers the pre- and post-refinement signals before and after doing each individual refinement cycle (i.e. more than once if times > 1) . See the section on signals in the general documentation of this class.
    @@ -7237,7 +7237,7 @@
    -

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    +

    Iterator to the first quad, used or not, on the given level. If a level has no quads, a past-the-end iterator is returned. If quads are no cells, i.e. for $dim>2$ no level argument must be given.

    Note
    The given level argument needs to correspond to a level of the triangulation, i.e., should be less than the value returned by n_levels(). On the other hand, for parallel computations using a parallel::distributed::Triangulation object, it is often convenient to write loops over the cells of all levels of the global mesh, even if the local portion of the triangulation does not actually have cells at one of the higher levels. In those cases, the level argument is accepted if it is less than what the n_global_levels() function returns. If the given level is between the values returned by n_levels() and n_global_levels(), then no cells exist in the local portion of the triangulation at this level, and the function simply returns what end() would return.
    /usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-11-15 06:44:24.551628006 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/deprecated.html 2024-11-15 06:44:24.551628006 +0000 @@ -147,9 +147,9 @@
    Member DoFTools::map_dofs_to_support_points (const hp::MappingCollection< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, std::map< types::global_dof_index, Point< spacedim > > &support_points, const ComponentMask &mask={})
    Use the function that returns the std::map instead.
    Member FEEvaluationData< dim, Number, is_face >::get_normal_vector (const unsigned int q_point) const
    -
    Use normal_vector() instead.
    +
    Use normal_vector() instead.
    Member FEFaceEvaluation< dim, fe_degree, n_q_points_1d, n_components_, Number, VectorizedArrayType >::integrate_scatter (const bool integrate_values, const bool integrate_gradients, VectorType &output_vector)
    -
    Please use the integrate_scatter() function with the EvaluationFlags argument.
    +
    Please use the integrate_scatter() function with the EvaluationFlags argument.
    Member FEInterfaceViews::Vector< dim, spacedim >::average_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
    Use the average_of_hessians() function instead.
    Member FEInterfaceViews::Vector< dim, spacedim >::jump_gradient (const unsigned int interface_dof_index, const unsigned int q_point) const
    @@ -157,7 +157,7 @@
    Member FEInterfaceViews::Vector< dim, spacedim >::jump_hessian (const unsigned int interface_dof_index, const unsigned int q_point) const
    Use the average_of_hessians() function instead.
    Member FEPointEvaluationBase< n_components_, dim, spacedim, Number >::real_point (const unsigned int point_index) const
    -
    Use the function quadrature_point() instead.
    +
    Use the function quadrature_point() instead.
    Member FETools::Compositing::compute_nonzero_components (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0, const bool do_tensor_product=true)
    Use the versions of this function that take a vector of elements or an initializer list as arguments.
    Member FETools::Compositing::compute_restriction_is_additive_flags (const FiniteElement< dim, spacedim > *fe1, const unsigned int N1, const FiniteElement< dim, spacedim > *fe2=nullptr, const unsigned int N2=0, const FiniteElement< dim, spacedim > *fe3=nullptr, const unsigned int N3=0, const FiniteElement< dim, spacedim > *fe4=nullptr, const unsigned int N4=0, const FiniteElement< dim, spacedim > *fe5=nullptr, const unsigned int N5=0)
    @@ -169,9 +169,9 @@
    Member FiniteElement< dim, spacedim >::get_face_data (const UpdateFlags update_flags, const Mapping< dim, spacedim > &mapping, const Quadrature< dim - 1 > &quadrature, internal::FEValuesImplementation::FiniteElementRelatedData< dim, spacedim > &output_data) const
    Use the version taking a hp::QCollection argument.
    Member GridTools::fix_up_distorted_child_cells (const typename Triangulation< dim, spacedim >::DistortedCellList &distorted_cells, Triangulation< dim, spacedim > &triangulation)
    -
    This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
    +
    This function predates deal.II's use of manifolds and use of cell-local transfinite interpolation to place new points and is no longer necessary. See Manifolds::get_default_points_and_weights() for more information.
    Member GridTools::rotate (const double angle, const unsigned int axis, Triangulation< dim, 3 > &triangulation)
    -
    Use the alternative with the unit vector instead.
    +
    Use the alternative with the unit vector instead.
    Member identity
    Use std_cxx20::type_identity instead.
    Member LinearAlgebra::CUDAWrappers::Vector< Number >::import (const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={})
    @@ -231,9 +231,9 @@
    Member parallel::fullydistributed::Triangulation< dim, spacedim >::load (const std::string &filename, const bool autopartition) override
    The autopartition parameter has been removed.
    Member ParameterHandler::ShortText
    -
    Use ShortPRM instead of ShortText.
    +
    Use ShortPRM instead of ShortText.
    Member ParameterHandler::Text
    -
    Use PRM instead of Text.
    +
    Use PRM instead of Text.
    Member Particles::ParticleAccessor< dim, spacedim >::set_property_pool (PropertyPool< dim, spacedim > &property_pool)
    This function is only kept for backward compatibility and has no meaning any more. ParticleAccessors always use the property pool of the owning particle handler.
    Member Particles::ParticleHandler< dim, spacedim >::register_load_callback_function (const bool serialization)
    @@ -241,7 +241,7 @@
    Member Particles::ParticleHandler< dim, spacedim >::register_store_callback_function ()
    Please use prepare_for_coarsening_and_refinement() or prepare_for_serialization() instead. See there for further information about the purpose of this function.
    Class PathSearch
    -
    Use the std::filesystem facilities instead.
    +
    Use the std::filesystem facilities instead.
    Member PETScWrappers::SolverBiCG::SolverBiCG (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
    Member PETScWrappers::SolverBicgstab::SolverBicgstab (SolverControl &cn, const MPI_Comm mpi_communicator, const AdditionalData &data=AdditionalData())
    @@ -277,7 +277,7 @@
    Member Physics::Transformations::Rotations::rotation_matrix_3d (const Point< 3, Number > &axis, const Number &angle)
    Use the variant with a Tensor as an axis.
    Member PolarManifold< dim, spacedim >::center
    -
    Use get_center() instead.
    +
    Use get_center() instead.
    Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const unsigned int n_quadrature_points)
    Use the version of this function which takes a combined_orientation argument instead.
    Member QProjector< dim >::DataSetDescriptor::face (const ReferenceCell &reference_cell, const unsigned int face_no, const bool face_orientation, const bool face_flip, const bool face_rotation, const hp::QCollection< dim - 1 > &quadrature)
    @@ -295,7 +295,7 @@
    Member SparsityTools::distribute_sparsity_pattern (BlockDynamicSparsityPattern &dsp, const std::vector< IndexSet > &owned_set_per_cpu, const MPI_Comm mpi_comm, const IndexSet &myrange)
    Use the distribute_sparsity_pattern() with a single index set for the present MPI process only.
    Member SphericalManifold< dim, spacedim >::center
    -
    Use get_center() instead.
    +
    Use get_center() instead.
    Member SymmetricTensor< rank_, dim, Number >::begin_raw ()
    This function suggests that the elements of a SymmetricTensor object are stored as a contiguous array, but this is not in fact true and one should not pretend that this so. As a consequence, this function is deprecated.
    Member SymmetricTensor< rank_, dim, Number >::begin_raw () const
    @@ -321,7 +321,7 @@
    Member Utilities::MPI::create_group (const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
    Use MPI_Comm_create_group directly
    Member Utilities::MPI::RemotePointEvaluation< dim, spacedim >::RemotePointEvaluation (const double tolerance, const bool enforce_unique_mapping=false, const unsigned int rtree_level=0, const std::function< std::vector< bool >()> &marked_vertices={})
    -
    +
    Member XDMFEntry::get_xdmf_content (const unsigned int indent_level, const ReferenceCell &reference_cell) const
    Use the other function instead.
    Member XDMFEntry::XDMFEntry (const std::string &filename, const double time, const std::uint64_t nodes, const std::uint64_t cells, const unsigned int dim)
    /usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-11-15 06:44:24.575628221 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/derivative__form_8h.html 2024-11-15 06:44:24.575628221 +0000 @@ -198,7 +198,7 @@
    -

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    +

    One of the uses of DerivativeForm is to apply it as a linear transformation. This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed by the amount $\Delta \mathbf x$

    \[
   \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x
   \approx
@@ -332,11 +332,11 @@
   </tr>
 </table>
 </div><div class= -

    Similar to the previous apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    -\[
+<p>Similar to the previous <a class=apply_transformation(). In matrix notation, it computes $DF2 \, DF1^{T}$. Moreover, the result of this operation $\mathbf A$ can be interpreted as a metric tensor in ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric tensor in ${\mathbb R}^\text{dim}$. For every pair of vectors $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have:

    +\[
   \mathbf u \cdot \mathbf A \mathbf v =
   \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v)
-\] +\]" src="form_404.png"/>

    Definition at line 589 of file derivative_form.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html differs (HTML document, ASCII text) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 2024-11-15 06:44:25.811639260 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/doxygen_crawl.html 2024-11-15 06:44:25.811639260 +0000 @@ -386,8 +386,8 @@ - + @@ -463,8 +463,8 @@ - + @@ -891,8 +891,8 @@ - + @@ -1184,8 +1184,8 @@ - + @@ -1207,8 +1207,8 @@ - + @@ -1225,14 +1225,14 @@ - + - + @@ -1245,8 +1245,8 @@ - + @@ -1255,8 +1255,8 @@ - + @@ -1265,8 +1265,8 @@ - + @@ -1354,8 +1354,8 @@ - + @@ -1378,8 +1378,8 @@ - + @@ -1613,8 +1613,8 @@ - + @@ -1695,8 +1695,8 @@ - + @@ -1728,8 +1728,8 @@ - + @@ -1748,8 +1748,8 @@ - + @@ -1761,8 +1761,8 @@ - + @@ -1860,8 +1860,8 @@ - + @@ -1901,8 +1901,8 @@ - + @@ -1962,8 +1962,8 @@ - + @@ -1984,13 +1984,13 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html 2024-11-15 06:44:25.859639689 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__coupling__values_8h.html 2024-11-15 06:44:25.859639689 +0000 @@ -171,11 +171,11 @@

    Quadrature coupling options when assembling quadrature formulas for double integrals.

    When computing the approximation of double integrals of the form

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) f(x_1) g(x_2) dT_1 dT_2,
-\] +\]" src="form_1087.png"/>

    -

    where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

    +

    where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one needs to combine quadrature formulas from two different FEValuesBase objects.

    This enum class provides a way to specify how the quadrature points and weights should be combined. In general, the two FEValuesBase objects provide different quadrature rules, and these can be interpreted in different ways, depending on the kernel function that is being integrated, and on how the two quadrature rules were constructed.

    This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the quadrature points and weights of the two FEValuesBase objects.

    @@ -217,11 +217,11 @@

    DoF coupling options when assembling double integrals.

    When computing the approximation of double integrals of the form

    -\[
+<picture><source srcset=\[
 \int_{T_1} \int{T_2} K(x_1, x_2) v_i(x_1) w_j(x_2) dT_1 dT_2,
-\] +\]" src="form_1090.png"/>

    -

    where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

    +

    where $T_1$ and $T_2$ are two arbitrary sets (cells, faces, edges, or any combination thereof), and $K$ is a (possibly singular) coupling kernel, one may want to combine degrees from two different FEValuesBase objects (i.e., basis functions $v_i$ and $w_j$ in the examples above).

    This enum class provides a way to specify how the degrees of freedom should be combined. There are two cases of interest:

    1. the two FEValuesBase objects refer to different DoFHandlers
    2. @@ -230,14 +230,14 @@

      In the first case, one usually treats the two sets of degrees of freedom as independent of each other, and the resulting matrix is generally rectangular.

      In the second case, one may choose to treat the two sets of degrees of freedom either as independent or to group them together. A similar approach is used in the FEInterfaceValues class, where the degrees of freedom of the two FEValuesBase objects are grouped together, in a contiguous way, so that the resulting basis functions are interpreted in the following way:

      -\[
+<picture><source srcset=\[
 \phi_{1,i}(x) = \begin{cases} v_i(x) & \text{ if } i \in [0,n_l) \\
 0 & \text{ if } i \in [n_1, n_1+n_2] \end{cases},\quad \phi_{1,i}(x) =
 \begin{cases} 0(x) & \text{ if } i \in [0,n_1) \\
 w_{i-n_1}(x) & \text{ if } i \in [n_1, n_1+n_2] \end{cases},
-\] +\]" src="form_1093.png"/>

      -

      where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

      +

      where $\phi_{1,i}$ is the first basis function with index $i$ and $n_{1,2}$ are the number of local dofs on the first and second FEValuesBase objects.

      This enum is used in the constructor of FECouplingValues to specify how to interpret and manipulate the local dof indices of the two FEValuesBase objects.

    Enumerator
    independent&#href_anchor"fielddoc">

    The FEValuesBase objects may have different dof indices, possibly indexing different DoFHandler objects, and we are interested in assembling a generally rectangular matrix, where there is no relationship between the two index spaces.

    /usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-11-15 06:44:25.919640225 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/fe__remote__evaluation_8h_source.html 2024-11-15 06:44:25.923640261 +0000 @@ -949,7 +949,7 @@
    1085 const std::vector<
    -
    1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
    +
    1086 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
    1087 &non_matching_faces_marked_vertices,
    1088 const unsigned int quad_no,
    1089 const unsigned int dof_no,
    @@ -1095,7 +1095,7 @@
    1229 const std::vector<
    -
    1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
    +
    1230 std::pair<types::boundary_id, std::function<std::vector<bool>()>>>
    1231 &non_matching_faces_marked_vertices,
    1232 const unsigned int n_q_pnts_1D,
    1233 const unsigned int dof_no,
    @@ -1497,7 +1497,6 @@
    static const unsigned int invalid_unsigned_int
    Definition types.h:220
    -
    unsigned int boundary_id
    Definition types.h:144
    std::vector< BoundingBox< boost::geometry::dimension< typename Rtree::indexable_type >::value > > extract_rtree_level(const Rtree &tree, const unsigned int level)
    RTree< typename LeafTypeIterator::value_type, IndexType, IndexableGetter > pack_rtree(const LeafTypeIterator &begin, const LeafTypeIterator &end)
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-11-15 06:44:25.959640582 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__CPP11.html 2024-11-15 06:44:25.959640582 +0000 @@ -191,7 +191,7 @@

    The macro DEAL_II_CONSTEXPR expands to constexpr if the compiler supports enough constexpr features (such as loops). If the compiler does not then this macro expands to nothing.

    Functions declared as constexpr can be evaluated at compile time. Hence code like

    constexpr double det_A = determinant(A);
    DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
    -

    assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

    +

    assuming A is declared with the constexpr specifier, will typically result in compile-time constants. This example shows the performance gains of using constexpr because here we performed an operation with $O(\text{dim}^3)$ complexity during compile time, avoiding any runtime cost.

    Function Documentation

    ◆ new_task()

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-11-15 06:44:25.975640725 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__Concepts.html 2024-11-15 06:44:25.975640725 +0000 @@ -185,7 +185,7 @@
    template <typename VectorType>
    virtual void Tstep(VectorType &u, const VectorType &v) const =0;
    };
    -

    where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

    +

    where these two member functions perform one step (or the transpose of such a step) of the smoothing scheme. In other words, the operations performed by these functions are $u = u - P^{-1} (A u - v)$ and $u = u - P^{-T} (A u - v)$.

    SparsityPatternType
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-11-15 06:44:26.055641440 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__LAOperators.html 2024-11-15 06:44:26.055641440 +0000 @@ -337,7 +337,7 @@
    std::function<void(Domain &, const Range &)> Tvmult;
    std::function<void(Domain &, const Range &)> Tvmult_add;

    Thus, such an object can be used as a matrix object in all iterative solver classes, either as a matrix object, or as preconditioner.

    -

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    +

    The big advantage of the LinearOperator class is that it provides syntactic sugar for complex matrix-vector operations. As an example consider the operation $(A+k\,B)\,C$, where $A$, $B$ and $C$ denote (possibly different) SparseMatrix objects. In order to construct a LinearOperator op that performs above computation when applied on a vector, one can write:

    #href_anchor"code" href="linear__operator__tools_8h.html">deal.II/lac/linear_operator_tools.h>
    double k;
    @@ -1470,7 +1470,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -1483,60 +1483,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    +\begin{eqnarray*}
   (3) \quad x &=& A^{-1}(f - By) \quad \text{from} \quad (1) \\
   (4) \quad y &=& D^{-1}(g - Cx) \quad \text{from} \quad (2) ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1947.png"/>

    which amount to performing block Gaussian elimination on this system of equations.

    For the purpose of the current implementation, we choose to substitute (3) into (2)

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   C \: A^{-1}(f - By) + Dy &=& g \\
   -C \: A^{-1} \: By + Dy &=& g - C \: A^{-1} \: f \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1948.png"/>

    This leads to the result

    -\[
+<picture><source srcset=\[
   (5) \quad (D - C\: A^{-1} \:B)y  = g - C \: A^{-1} f
       \quad \Rightarrow \quad Sy = g'
-\] +\]" src="form_1949.png"/>

    -

    with $ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    -

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    -\[
+<p> with <picture><source srcset=$ S = (D - C\: A^{-1} \:B) $ being the Schur complement and the modified right-hand side vector $ g' = g - C \: A^{-1} f $ arising from the condensation step. Note that for this choice of $ S $, submatrix $ D $ need not be invertible and may thus be the null matrix. Ideally $ A $ should be well-conditioned.

    +

    So for any arbitrary vector $ a $, the Schur complement performs the following operation:

    +\[
   (6) \quad Sa = (D - C \: A^{-1} \: B)a
-\] +\]" src="form_1956.png"/>

    A typical set of steps needed the solve a linear system (1),(2) would be:

    1. Define the inverse matrix A_inv (using inverse_operator()).
    2. -
    3. Define the Schur complement $ S $ (using schur_complement()).
    4. -
    5. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    6. +
    7. Define the Schur complement $ S $ (using schur_complement()).
    8. +
    9. Define iterative inverse matrix $ S^{-1} $ such that (6) holds. It is necessary to use a solver with a preconditioner to compute the approximate inverse operation of $ S $ since we never compute $ S $ directly, but rather the result of its operation. To achieve this, one may again use the inverse_operator() in conjunction with the Schur complement that we've just constructed. Observe that the both $ S $ and its preconditioner operate over the same space as $ D $.
    10. Perform pre-processing step on the RHS of (5) using condense_schur_rhs():

      -\[
+<picture><source srcset=\[
      g' = g - C \: A^{-1} \: f
-   \] + \]" src="form_1958.png"/>

    11. -
    12. Solve for $ y $ in (5):

      -\[
+<li>Solve for <picture><source srcset=$ y $ in (5):

      +\[
      y =  S^{-1} g'
-   \] + \]" src="form_1960.png"/>

    13. Perform the post-processing step from (3) using postprocess_schur_solution():

      -\[
+<picture><source srcset=\[
      x =  A^{-1} (f - By)
-   \] + \]" src="form_1961.png"/>

    @@ -1582,10 +1582,10 @@
    LinearOperator< Domain, Range, Payload > inverse_operator(const LinearOperator< Range, Domain, Payload > &op, Solver &solver, const Preconditioner &preconditioner)
    PackagedOperation< Domain_1 > postprocess_schur_solution(const LinearOperator< Range_1, Domain_1, Payload > &A_inv, const LinearOperator< Range_1, Domain_2, Payload > &B, const Domain_2 &y, const Range_1 &f)
    -

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    -

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
-$ is derived from the mass matrix over this space.

    -

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    +

    In the above example, the preconditioner for $ S $ was defined as the preconditioner for $ D $, which is valid since they operate on the same space. However, if $ D $ and $ S $ are too dissimilar, then this may lead to a large number of solver iterations as $ \text{prec}(D) $ is not a good approximation for $ S^{-1} $.

    +

    A better preconditioner in such a case would be one that provides a more representative approximation for $ S^{-1} $. One approach is shown in step-22, where $ D $ is the null matrix and the preconditioner for $ S^{-1}
+$ is derived from the mass matrix over this space.

    +

    From another viewpoint, a similar result can be achieved by first constructing an object that represents an approximation for $ S $ wherein expensive operation, namely $ A^{-1} $, is approximated. Thereafter we construct the approximate inverse operator $ \tilde{S}^{-1} $ which is then used as the preconditioner for computing $ S^{-1} $.

    // Construction of approximate inverse of Schur complement
    const auto A_inv_approx = linear_operator(preconditioner_A);
    const auto S_approx = schur_complement(A_inv_approx,B,C,D);
    @@ -1608,8 +1608,8 @@
    // Solve for y
    y = S_inv * rhs;
    x = postprocess_schur_solution (A_inv,B,y,f);
    -

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
-\text{prec}(D) $, should ideally be computationally inexpensive.

    +

    Note that due to the construction of S_inv_approx and subsequently S_inv, there are a pair of nested iterative solvers which could collectively consume a lot of resources. Therefore care should be taken in the choices leading to the construction of the iterative inverse_operators. One might consider the use of a IterationNumberControl (or a similar mechanism) to limit the number of inner solver iterations. This controls the accuracy of the approximate inverse operation $ \tilde{S}^{-1} $ which acts only as the preconditioner for $ S^{-1} $. Furthermore, the preconditioner to $ \tilde{S}^{-1} $, which in this example is $
+\text{prec}(D) $, should ideally be computationally inexpensive.

    However, if an iterative solver based on IterationNumberControl is used as a preconditioner then the preconditioning operation is not a linear operation. Here a flexible solver like SolverFGMRES (flexible GMRES) is best employed as an outer solver in order to deal with the variable behavior of the preconditioner. Otherwise the iterative solver can stagnate somewhere near the tolerance of the preconditioner or generally behave erratically. Alternatively, using a ReductionControl would ensure that the preconditioner always solves to the same tolerance, thereby rendering its behavior constant.

    Further examples of this functionality can be found in the test-suite, such as tests/lac/schur_complement_01.cc. The solution of a multi-component problem (namely step-22) using the schur_complement can be found in tests/lac/schur_complement_03.cc.

    See also
    Block (linear algebra)
    @@ -1646,15 +1646,15 @@
    const Range_2 & g&#href_anchor"memdoc">

    For the system of equations

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

    this operation performs the pre-processing (condensation) step on the RHS subvector g so that the Schur complement can be used to solve this system of equations. More specifically, it produces an object that represents the condensed form of the subvector g, namely

    -\[
+<picture><source srcset=\[
   g' = g - C \: A^{-1} \: f
-\] +\]" src="form_1968.png"/>

    See also
    Block (linear algebra)
    @@ -1690,15 +1690,15 @@
    const Range_1 & f&#href_anchor"memdoc">

    For the system of equations

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   Ax + By &=& f \\
   Cx + Dy &=& g \quad ,
-\end{eqnarray*} +\end{eqnarray*}" src="form_1967.png"/>

    this operation performs the post-processing step of the Schur complement to solve for the second subvector x once subvector y is known, with the result that

    -\[
+<picture><source srcset=\[
   x =  A^{-1}(f - By)
-\] +\]" src="form_1969.png"/>

    See also
    Block (linear algebra)
    @@ -3069,7 +3069,7 @@

    Return a LinearOperator that performs the operations associated with the Schur complement. There are two additional helper functions, condense_schur_rhs() and postprocess_schur_solution(), that are likely necessary to be used in order to perform any useful tasks in linear algebra with this operator.

    We construct the definition of the Schur complement in the following way:

    Consider a general system of linear equations that can be decomposed into two major sets of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 \mathbf{K}\mathbf{d} = \mathbf{f}
 \quad \Rightarrow\quad
 \left(\begin{array}{cc}
@@ -3082,60 +3082,60 @@
 \left(\begin{array}{cc}
    f \\ g
 \end{array}\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_1940.png"/>

    -

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    +

    where $ A,B,C,D $ represent general subblocks of the matrix $ \mathbf{K} $ and, similarly, general subvectors of $ \mathbf{d},\mathbf{f} $ are given by $ x,y,f,g $ .

    This is equivalent to the following two statements:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (1) \quad Ax + By &=& f \\
   (2) \quad Cx + Dy &=& g \quad .
-\end{eqnarray*} +\end{eqnarray*}" src="form_1945.png"/>

    -

    Assuming that $ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    -\begin{eqnarray*}
+<p>Assuming that <picture><source srcset=$ A,D $ are both square and invertible, we could then perform one of two possible substitutions,

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-11-15 06:44:26.079641654 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__UpdateFlags.html 2024-11-15 06:44:26.079641654 +0000 @@ -137,7 +137,7 @@ w_q, \]" src="form_272.png"/>

    - where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight. + where $q$ indicates the index of the quadrature point, $\hat{\bf x}_q$ its location on the reference cell, and $w_q$ its weight.

    In order to evaluate such an expression in an application code, we have to access three different kinds of objects: a quadrature object that describes locations $\hat{\bf x}_q$ and weights $w_q$ of quadrature points on the reference cell; a finite element object that describes the gradients $\hat\nabla \varphi_i(\hat{\bf x}_q)$ of shape functions on the unit cell; and a mapping object that provides the Jacobian as well as its determinant. Dealing with all these objects would be cumbersome and error prone.

    On the other hand, these three kinds of objects almost always appear together, and it is in fact very rare for deal.II application codes to do anything with quadrature, finite element, or mapping objects besides using them together. For this reason, deal.II uses the FEValues abstraction combining information on the shape functions, the geometry of the actual mesh cell and a quadrature rule on a reference cell. Upon construction it takes one object of each of the three mentioned categories. Later, it can be "re-initialized" for a concrete grid cell and then provides mapped quadrature points and weights, mapped shape function values and derivatives as well as some properties of the transformation from the reference cell to the actual mesh cell.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-11-15 06:44:26.103641868 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__auto__symb__diff.html 2024-11-15 06:44:26.103641868 +0000 @@ -116,7 +116,7 @@
    &#href_anchor"memitem:">namespace  Differentiation::SD
    &#href_anchor"details" id="details">

    Detailed Description

    A group dedicated to the implementation of functions and classes that relate to automatic and symbolic differentiation.

    -

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    +

    Below we provide a very brief introduction as to what automatic and symbolic differentiation are, what variations of these computational/numerical schemes exist, and how they are integrated within deal.II's framework. The purpose of all of these schemes is to automatically compute the derivative of functions, or approximations of it, in cases where one does not want to compute them by hand. Common examples are situations in the finite element context is where one wants to solve a nonlinear problem that is given by requiring that some residual $F(u,\nabla u)=0$ where $F$ is a complicated function that needs to be differentiated to apply Newton's method; and situations where one is given a parameter dependent problem ${\cal A}(q,u,\nabla u) = f$ and wants to form derivatives with regards to the parameters $q$, for example to optimize an output functional with regards to $q$, or for a sensitivity analysis with regards to $q$. One should think of $q$ as design parameters: say, the width or shape of a wing, the stiffness coefficients of a material chosen to build an object, the power sent to a device, the chemical composition of the gases sent to a burner. In all of these cases, one should think of $F$ and $\cal A$ as complicated and cumbersome to differentiate – at least when doing it by hand. A relatively simple case of a nonlinear problem that already highlights the tedium of computing derivatives by hand is shown in step-15. However, in reality, one might, for example, think about problems such as chemically reactive flows where the fluid equations have coefficients such as the density and viscosity that depend strongly and nonlinearly on the chemical composition, temperature, and pressure of the fluid at each point; and where the chemical species react with each other based on reaction coefficients that also depend nonlinearly and in complicated ways on the chemical composition, temperature, and pressure. In many cases, the exact formulas for all of these coefficients can take several lines to write out, may include exponentials and (harmonic or geometric) averages of several nonlinear terms, and/or may contain table lookup of and interpolation between data points. Just getting these terms right is difficult enough; computing derivatives of these terms is impractical in most applications and, in reality, impossible to get right. Higher derivatives are even more impossible to do without computer aid. Automatic or symbolic differentiation is a way out of this: One only has to implement the function that computes these coefficients in terms of their inputs only once, and gets the (correct!) derivatives without further coding effort (though at a non-negligible computational cost either at run time, compile time, or both).

    Automatic differentiation

    Automatic differentiation (commonly also referred to as algorithmic differentiation), is a numerical method that can be used to "automatically" compute the first, and perhaps higher-order, derivatives of function(s) with respect to one or more input variables. Although this comes at a certain computational cost, the benefits to using such a tool may be significant. When used correctly the derivatives of often complicated functions can be computed to a very high accuracy. Although the exact accuracy achievable by these frameworks largely depends on their underlying mathematical formulation, some implementations compute with a precision on the order of machine accuracy. Note that this is different to classical numerical differentiation (using, for example, a finite difference approximation of a function by evaluating it at different points), which has an accuracy that depends on both the perturbation size as well as the chosen finite-difference scheme; the error of these methods is measurably larger than well-formulated automatic differentiation approaches.

    @@ -164,38 +164,38 @@
  • reverse-mode (or reverse accumulation) auto-differentiation.
  • As a point of interest, the optimal Jacobian accumulation, which performs a minimal set of computations, lies somewhere between these two limiting cases. Its computation for a general composite function remains an open problem in graph theory.

    -

    With the aid of the diagram below (it and some of the listed details courtesy of this Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    -
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    -

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    -\[
+<p>With the aid of the diagram below (it and some of the listed details courtesy of this <a href=Wikipedia article), let us think about the representation of the calculation of the function $f (\mathbf{x}) = \sin (x_{1}) + x_{1} x_{2}$ and its derivatives:

    +
    Forward mode automatic differentiation
    Forward mode automatic differentiation
    Reverse mode automatic differentiation
    Reverse mode automatic differentiation

    Specifically, we will briefly describe what forward and reverse auto-differentiation are. Note that in the diagram, along the edges of the graph in text are the directional derivative of function $w$ with respect to the $i$-th variable, represented by the notation $\dot{w} = \dfrac{d w}{d x_{i}}$. The specific computations used to render the function value and its directional derivatives for this example are tabulated in the source article. For a second illustrative example, we refer the interested reader to this article.

    +

    Consider first that any composite function $f(x)$, here represented as having two independent variables, can be dissected into a composition of its elementary functions

    +\[
   f (\mathbf{x})
   = f_{0} \circ f_{1} \circ f_{2} \circ \ldots \circ f_{n} (\mathbf{x})
   \quad .
-\] +\]" src="form_10.png"/>

    -

    As was previously mentioned, if each of the primitive operations $f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    -\[
+<p> As was previously mentioned, if each of the primitive operations <picture><source srcset=$f_{n}$ is smooth and differentiable, then the chain-rule can be universally employed to compute the total derivative of $f$, namely $\dfrac{d f(x)}{d \mathbf{x}}$. What distinguishes the "forward" from the "reverse" mode is how the chain-rule is evaluated, but ultimately both compute the total derivative

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \dfrac{d f_{1}}{d f_{2}} \dfrac{d f_{2}}{d f_{3}} \ldots \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}}
   \quad .
-\] +\]" src="form_14.png"/>

    -

    In forward-mode, the chain-rule is computed naturally from the "inside out". The independent variables are therefore fixed, and each sub-function $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    -\[
+<p>In forward-mode, the chain-rule is computed naturally from the $f'_{i} \vert_{f'_{i+1}}$ is computed recursively and its result returned as inputs to the parent function. Encapsulating and fixing the order of operations using parentheses, this means that we compute

    +\[
   \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \dfrac{d f_{0}}{d f_{1}} \left( \dfrac{d f_{1}}{d f_{2}} \left(\dfrac{d f_{2}}{d f_{3}} \left(\ldots \left( \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)\right)\right)\right)
   \quad .
-\] +\]" src="form_16.png"/>

    The computational complexity of a forward-sweep is proportional to that of the input function. However, for each directional derivative that is to be computed one sweep of the computational graph is required.

    In reverse-mode, the chain-rule is computed somewhat unnaturally from the "outside in". The values of the dependent variables first get computed and fixed, and then the preceding differential operations are evaluated and multiplied in succession with the previous results from left to right. Again, if we encapsulate and fix the order of operations using parentheses, this implies that the reverse calculation is performed by

    -\[
+<picture><source srcset=\[
 \dfrac{d f (\mathbf{x})}{d \mathbf{x}}
   = \left( \left( \left( \left( \left( \dfrac{d f_{0}}{d f_{1}} \right) \dfrac{d f_{1}}{d f_{2}} \right) \dfrac{d f_{2}}{d f_{3}} \right) \ldots \right) \dfrac{d f_{n} (\mathbf{x})}{d \mathbf{x}} \right)
   \quad .
-\] +\]" src="form_17.png"/>

    -

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    +

    The intermediate values $\dfrac{d f_{i-1}}{d f_{i}}$ are known as adjoints, which must be computed and stored as the computational graph is traversed. However, for each dependent scalar function one sweep of the computational graph renders all directional derivatives at once.

    Overall, the efficiency of each mode is determined by the number of independent (input) variables and dependent (output) variables. If the outputs greatly exceed the inputs in number, then forward-mode can be shown to be more efficient than reverse-mode. The converse is true when the number of input variables greatly exceeds that of the output variables. This point may be used to help inform which number type is most suitable for which set of operations are to be performed using automatic differentiation. For example, in many applications for which second derivatives are to be computed it is appropriate to combine both reverse- and forward-modes. The former would then typically be used to calculate the first derivatives, and the latter the second derivatives.

    Supported automatic differentiation libraries

    @@ -343,7 +343,7 @@

    Symbolic expressions and differentiation

    Symbolic differentiation is, in terms of its design and usage, quite different to automatic differentiation. Underlying any symbolic library is a computer algebra system (CAS) that implements a language and collection of algorithms to manipulate symbolic (or "string-like") expressions. This is most similar, from a philosophical point of view, to how algebraic operations would be performed by hand.

    -

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    +

    To help better distinguish between symbolic differentiation and numerical methods like automatic differentiation, let's consider a very simple example. Suppose that the function $f(x,y) = [2x+1]^{y}$, where $x$ and $y$ are variables that are independent of one another. By applying the chain-rule, the derivatives of this function are simply $\dfrac{d f(x,y)}{d x} = 2y[2x+1]^{y-1}$ and $\dfrac{d f(x,y)}{d y} = [2x+1]^{y} \ln(2x+1)$. These are exactly the results that you get from a CAS after defining the symbolic variables x and y, defining the symbolic expression f = pow(2x+1, y) and computing the derivatives diff(f, x) and diff(f, y). At this point there is no assumption of what x and y represent; they may later be interpreted as plain (scalar) numbers, complex numbers, or something else for which the power and natural logarithm functions are well defined. Obviously this means that there is also no assumption about which point to evaluate either the expression or its derivatives. One could readily take the expression for $\dfrac{d f(x, y)}{d x}$ and evaluate it at $x=1, y=2.5$ and then later, with no recomputation of the derivative expression itself, evaluate it at $x=3.25, y=-6$. In fact, the interpretation of any symbolic variable or expression, and the inter-dependencies between variables, may be defined or redefined at any point during their manipulation; this leads to a degree of flexibility in computations that cannot be matched by auto-differentiation. For example, one could perform the permanent substitution $g(x) = \dfrac{d f(x, y)}{d x} \vert_{y=1}$ and then recompute $g(x)$ for several different values of $x$. One could also post-factum express an interdependency between x and y, such as $y \rightarrow y(x) := 2x$. For such a case, this means that the initially computed derivatives $\dfrac{d f(x, y)}{d x} \rightarrow \dfrac{\partial f(x, y(x))}{\partial x} = 2y(x) [2x+1]^{y(x)-1} = 4x[2x+1]^{2x-1}$ and $\dfrac{d f(x, y)}{d y} \rightarrow \dfrac{\partial f(x, y(x))}{\partial y} = [2x+1]^{y(x)} \ln(2x+1) = [2x+1]^{2x} \ln(2x+1)$ truly represent partial derivatives rather than total derivatives. Of course, if such an inter-dependency was explicitly defined before the derivatives $\dfrac{d f(x, y(x))}{d x}$ and $\dfrac{d f(x, y(x))}{d y}$ are computed, then this could correspond to the total derivative (which is the only result that auto-differentiation is able to achieve for this example).

    Due to the sophisticated CAS that forms the foundation of symbolic operations, the types of manipulations are not necessarily restricted to differentiation alone, but rather may span a spectrum of manipulations relevant to discrete differential calculus, topics in pure mathematics, and more. The documentation for the SymPy library gives plenty of examples that highlight what a fully-fledged CAS is capable of. Through the Differentiation::SD::Expression class, and the associated functions in the Differentiation::SD namespace, we provide a wrapper to the high-performance SymEngine symbolic manipulation library that has enriched operator overloading and a consistent interface that makes it easy and "natural" to use. In fact, this class can be used as a "drop-in" replacement for arithmetic types in many situations, transforming the operations from being numeric to symbolic in nature; this is made especially easy when classes are templated on the underlying number type. Being focused on numerical simulation of PDEs, the functionality of the CAS that is exposed within deal.II focuses on symbolic expression creation, manipulation, and differentiation.

    The convenience wrappers to SymEngine functionality are primarily focused on manipulations that solely involve dictionary-based (i.e., something reminiscent of "string-based") operations. Although SymEngine performs these operations in an efficient manner, they are still known to be computationally expensive, especially when the operations are performed on large expressions. It should therefore be expected that the performance of the parts of code that perform differentiation, symbolic substitution, etc., may be a limiting factor when using this in production code. deal.II therefore provides an interface to accelerate the evaluation of lengthy symbolic expression through the BatchOptimizer class (itself often leveraging functionality provided by SymEngine). In particular, the BatchOptimizer simultaneously optimizes a collection of symbolic expressions using methods such as common subexpression elimination (CSE), as well as by generating high performance code-paths to evaluate these expressions through the use of a custom-generated std::function or by compiling the expression using the LLVM JIT compiler. The usage of the Differentiation::SD::BatchOptimizer class is exemplified in step-71.

    As a final note, it is important to recognize the remaining major deficiencies in deal.II's current implementation of the interface to the supported symbolic library. The level of functionality currently implemented effectively limits the use of symbolic algebra to the traditional use case (i.e. scalar and tensor algebra, as might be useful to define constitutive relations or complex functions for application as boundary conditions or source terms). In fact, step-71 demonstrates how it can be used to implement challenging constitutive models. In the future we will also implement classes to assist in performing assembly operations in the same spirit as that which has been done in the Differentiation::AD namespace.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-11-15 06:44:26.171642475 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__constraints.html 2024-11-15 06:44:26.171642475 +0000 @@ -216,7 +216,7 @@
  • If you have boundary conditions that set a certain part of the solution's value, for example no normal flux, $\mathbf n \cdot
   \mathbf u=0$ (as happens in flow problems and is handled by the VectorTools::compute_no_normal_flux_constraints function) or prescribed tangential components, $\mathbf{n}\times\mathbf{u}=
   \mathbf{n}\times\mathbf{f}$ (as happens in electromagnetic problems and is handled by the VectorTools::project_boundary_values_curl_conforming function). For the former case, imagine for example that we are at at vertex where the normal vector has the form $\frac 1{\sqrt{14}}
-  (1,2,3)^T$ and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
  • + (1,2,3)^T$" src="form_43.png"/> and that the $x$-, $y$- and $z$-components of the flow field at this vertex are associated with degrees of freedom 12, 28, and 40. Then the no-normal-flux condition means that we need to have the condition $\frac 1{\sqrt{14}} (x_{12}+2x_{28}+3x_{40})=0$. The prescribed tangential component leads to similar constraints though there is often something on the right hand side.
  • If you have hanging node constraints, for example in a mesh like this:
    @@ -309,7 +309,7 @@ \]" src="form_70.png"/>

    instead (see, for example, [Shephard1984]).

    -

    Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

    +

    Here, $A$ is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. $b$ is the corresponding right hand side of a system of linear equations $A\,x=b$. The matrix $C$ describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector $k$ is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector $x$ is the operation

    \[
  x \leftarrow C\,x+k.
 \] @@ -376,7 +376,7 @@

  • Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

    -

    This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

    +

    This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry $A_{ij}$ that is computed from two basis functions $\varphi_i$ and $\varphi_j$ with (global) indices $i$ and $j$ (for example, using a bilinear form $A_{ij}=a(\varphi_i,\varphi_j)$) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

    This algorithm makes no distinction between shape functions on each cell, i.e., it simply couples all degrees of freedom on a cell with all other degrees of freedom on a cell. This is often the case, and always a safe assumption. However, if you know something about the structure of your operator and that it does not couple certain shape functions with certain test functions, then you can get a sparser sparsity pattern by calling a variant of the current function described below that allows to specify which vector components couple with which other vector components.

    The method described above lives on the assumption that coupling between degrees of freedom only happens if shape functions overlap on at least one cell. This is the case with most usual finite element formulations involving conforming elements. However, for formulations such as the Discontinuous Galerkin finite element method, the bilinear form contains terms on interfaces between cells that couple shape functions that live on one cell with shape functions that live on a neighboring cell. The current function would not see these couplings, and would consequently not allocate entries in the sparsity pattern. You would then get into trouble during matrix assembly because you try to write into matrix entries for which no space has been allocated in the sparsity pattern. This can be avoided by calling the DoFTools::make_flux_sparsity_pattern() function instead, which takes into account coupling between degrees of freedom on neighboring cells.

    There are other situations where bilinear forms contain non-local terms, for example in treating integral equations. These require different methods for building the sparsity patterns that depend on the exact formulation of the problem. You will have to do this yourself then.

    @@ -446,7 +446,7 @@ -\Delta \mathbf u + \nabla p &= 0,\\ \text{div}\ u &= 0 \end{align*}" src="form_1013.png"/>

    -

    in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

    +

    in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only $u_x$ and $p$ appear; in the second equation, only $u_y$ and $p$ appear; and in the third equation, only $u_x$ and $u_y$ appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

    \[
 \left[
@@ -763,9 +763,9 @@
 <div class=

    LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator(const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -801,9 +801,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -1161,25 +1161,25 @@

    This function is an updated version of the project_boundary_values_curl_conforming function. The intention is to fix a problem when using the previous function in conjunction with non-rectangular geometries (i.e. elements with non-rectangular faces). The L2-projection method used has been taken from the paper "Electromagnetic scattering simulation using an H (curl) conforming hp-finite element method in three dimensions" by PD Ledger, K Morgan and O Hassan ( Int. J. Num. Meth. Fluids, Volume 53, Issue 8, pages 1267-1296).

    -

    This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

    +

    This function will compute constraints that correspond to Dirichlet boundary conditions of the form $\vec{n}\times\vec{E}=\vec{n}\times\vec{F}$ i.e. the tangential components of $\vec{E}$ and $f$ shall coincide.

    Computing constraints

    To compute the constraints we use a projection method based upon the paper mentioned above. In 2d this is done in a single stage for the edge-based shape functions, regardless of the order of the finite element. In 3d this is done in two stages, edges first and then faces.

    -

    For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

    -

    $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

    -

    $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

    -

    with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

    -

    Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

    -

    For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

    -

    $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
-\vec{s}_{j}) dS$

    -

    $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
-\vec{s}_i) dS$

    -

    and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

    -

    The resulting constraints are then given in the solutions $x$ and $y$.

    +

    For each cell, each edge, $e$, is projected by solving the linear system $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the edge and

    +

    $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$

    +

    $b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS$

    +

    with $\vec{s}_{i}$ the $i^{th}$ shape function and $\vec{t}$ the tangent vector.

    +

    Once all edge constraints, $x$, have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

    +

    For each face on the cell, $f$, we solve the linear system $By=c$ where $y$ is the vector of constraints on degrees of freedom on the face and

    +

    $B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times
+\vec{s}_{j}) dS$

    +

    $c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times
+\vec{s}_i) dS$

    +

    and $\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i$, the edge residual.

    +

    The resulting constraints are then given in the solutions $x$ and $y$.

    If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

    Arguments to this function

    This function is explicitly for use with FE_Nedelec elements, or with FESystem elements which contain FE_Nedelec elements. It will throw an exception if called with any other finite element. The user must ensure that FESystem elements are correctly setup when using this function as this check not possible in this case.

    -

    The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

    +

    The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3d and have components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary conditions $\vec{n}\times\vec{B}=\vec{n}\times\vec{f}$, then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to $\vec{E}$ and the second 3 corresponding to $\vec{B}$. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component.

    The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

    The last argument is denoted to compute the normal vector $\vec{n}$ at the boundary points.

    See also
    Glossary entry on boundary indicators
    @@ -1264,10 +1264,10 @@ const Mapping< dim > & mapping&#href_anchor"memdoc"> -

    Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

    +

    Compute constraints that correspond to boundary conditions of the form $\vec{n}^T\vec{u}=\vec{n}^T\vec{f}$, i.e. the normal components of the solution $u$ and a given $f$ shall coincide. The function $f$ is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

    This function is explicitly written to use with the FE_RaviartThomas elements. Thus it throws an exception, if it is called with other finite elements.

    If the AffineConstraints object constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the topic on Constraints on degrees of freedom.

    -

    The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

    +

    The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function $\vec{u}$ that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., $x$-, $y$-, and finally $z$-component.

    The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector $\vec{n}$ at the boundary points.

    Computing constraints

    To compute the constraints we use interpolation operator proposed in Brezzi, Fortin (Mixed and Hybrid Finite Element Methods, Springer, 1991) on every face located at the boundary.

    @@ -1363,8 +1363,8 @@

    This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where $\vec u_\Gamma \cdot \vec n = 0$, and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

    Note
    This function doesn't make much sense in 1d, so it throws an exception if dim equals one.

    Arguments to this function

    -

    The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
-B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

    +

    The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components $(u,v,p)$, then first_vector_component needs to be zero if you intend to constraint the vector $(u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n$. On the other hand, if we solved the Maxwell equations in 3d and the finite element has components $(E_x,E_y,E_z,B_x,B_y,B_z)$ and we want the boundary condition $\vec
+B\cdot \vec n=\vec B_\Gamma\cdot \vec n$, then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. $x$-, $y$-, and finally $z$-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a $Q_2$ element, whereas all the other ones are $Q_1$ elements) as there would be points on the boundary where the $x$-velocity is defined but no corresponding $y$- or $z$-velocities.

    The third argument denotes the set of boundary indicators on which the boundary condition is to be enforced. Note that, as explained below, this is one of the few functions where it makes a difference where we call the function multiple times with only one boundary indicator, or whether we call the function once with the whole set of boundary indicators at once.

    Argument four (function_map) describes the boundary function $\vec
 u_\Gamma$ for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

    @@ -1756,9 +1756,9 @@
    Id_c = project_to_constrained_linear_operator(constraints, linop);

    and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -1801,9 +1801,9 @@

    with

    This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

    -\[
+<picture><source srcset=\[
   (C^T A C + Id_c) x = C^T (b - A\,k)
-\] +\]" src="form_1698.png"/>

    with a given (unconstrained) system matrix $A$, right hand side $b$, and linear constraints $C$ with inhomogeneities $k$.

    A detailed explanation of this approach is given in the Constraints on degrees of freedom topic.

    @@ -2012,7 +2012,7 @@ V_j \varphi_j^{(1)} \in {\cal V}_1$" src="form_1026.png"/> that also satisfies $v_h\in {\cal
 V}_0$ automatically satisfies $CV=0$. This function computes the matrix $C$ in the form of a AffineConstraints object.

    The construction of these constraints is done as follows: for each of the degrees of freedom (i.e. shape functions) on the coarse grid, we compute its representation on the fine grid, i.e. how the linear combination of shape functions on the fine grid looks like that resembles the shape function on the coarse grid. From this information, we can then compute the constraints which have to hold if a solution of a linear equation on the fine grid shall be representable on the coarse grid. The exact algorithm how these constraints can be computed is rather complicated and is best understood by reading the source code, which contains many comments.

    -

    The use of this function is as follows: it accepts as parameters two DoF Handlers, the first of which refers to the coarse grid and the second of which is the fine grid. On both, a finite element is represented by the DoF handler objects, which will usually have several vector components, which may belong to different base elements. The second and fourth parameter of this function therefore state which vector component on the coarse grid shall be used to restrict the stated component on the fine grid. The finite element used for the respective components on the two grids needs to be the same. An example may clarify this: consider an optimization problem with controls $q$ discretized on a coarse mesh and a state variable $u$ (and corresponding Lagrange multiplier $\lambda$) discretized on the fine mesh. These are discretized using piecewise constant discontinuous, continuous linear, and continuous linear elements, respectively. Only the parameter $q$ is represented on the coarse grid, thus the DoFHandler object on the coarse grid represents only one variable, discretized using piecewise constant discontinuous elements. Then, the parameter denoting the vector component on the coarse grid would be zero (the only possible choice, since the variable on the coarse grid is scalar). If the ordering of variables in the fine mesh FESystem is $u, q, \lambda$, then the fourth argument of the function corresponding to the vector component would be one (corresponding to the variable $q$; zero would be $u$, two would be $\lambda$).

    +

    The use of this function is as follows: it accepts as parameters two DoF Handlers, the first of which refers to the coarse grid and the second of which is the fine grid. On both, a finite element is represented by the DoF handler objects, which will usually have several vector components, which may belong to different base elements. The second and fourth parameter of this function therefore state which vector component on the coarse grid shall be used to restrict the stated component on the fine grid. The finite element used for the respective components on the two grids needs to be the same. An example may clarify this: consider an optimization problem with controls $q$ discretized on a coarse mesh and a state variable $u$ (and corresponding Lagrange multiplier $\lambda$) discretized on the fine mesh. These are discretized using piecewise constant discontinuous, continuous linear, and continuous linear elements, respectively. Only the parameter $q$ is represented on the coarse grid, thus the DoFHandler object on the coarse grid represents only one variable, discretized using piecewise constant discontinuous elements. Then, the parameter denoting the vector component on the coarse grid would be zero (the only possible choice, since the variable on the coarse grid is scalar). If the ordering of variables in the fine mesh FESystem is $u, q, \lambda$, then the fourth argument of the function corresponding to the vector component would be one (corresponding to the variable $q$; zero would be $u$, two would be $\lambda$).

    The function also requires an object of type IntergridMap representing how to get from the coarse mesh cells to the corresponding cells on the fine mesh. This could in principle be generated by the function itself from the two DoFHandler objects, but since it is probably available anyway in programs that use different meshes, the function simply takes it as an argument.

    The computed constraints are entered into a variable of type AffineConstraints; previous contents are not deleted.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 2024-11-15 06:44:26.199642726 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__feaccess.html 2024-11-15 06:44:26.199642726 +0000 @@ -241,7 +241,7 @@ update_quadrature_points&#href_anchor"fielddoc">

    Transformed quadrature points.

    Compute the quadrature points location in real cell coordinates.

    -

    FEValues objects take the quadrature point locations on the reference cell as an argument of the constructor (via the Quadrature object). For most finite elements, knowing the location of quadrature points on the reference cell is all that is necessary to evaluate shape functions, evaluate the mapping, and other things. On the other hand, if you want to evaluate a right hand side function $f(\mathbf x_q)$ at quadrature point locations $\mathbf x_q$ on the real cell, you need to pass this flag to the FEValues constructor to make sure you can later access them.

    +

    FEValues objects take the quadrature point locations on the reference cell as an argument of the constructor (via the Quadrature object). For most finite elements, knowing the location of quadrature points on the reference cell is all that is necessary to evaluate shape functions, evaluate the mapping, and other things. On the other hand, if you want to evaluate a right hand side function $f(\mathbf x_q)$ at quadrature point locations $\mathbf x_q$ on the real cell, you need to pass this flag to the FEValues constructor to make sure you can later access them.

    There are contexts other than FEValues (and related classes) that take update flags. An example is the DataPostprocessor class (and derived classes). In these cases, the update_quadrature_points flag is generally understood to update the location of "evaluation points", i.e., the physical locations of the points at which the solution is evaluated. As a consequence, the flag is misnamed in these contexts: No quadrature (i.e., computation of integrals) is involved, and consequently what is being updated is, in the context of DataPostprocessor, the member variable DataPostprocessorInputs::CommonInputs::evaluation_points.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-11-15 06:44:26.215642869 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__hpcollection.html 2024-11-15 06:44:26.215642869 +0000 @@ -130,7 +130,7 @@ * fe_collection.push_back (FE_Q<dim>(degree)); *

    This way, one can add elements of polynomial degree 1 through 4 to the collection. It is not necessary to retain the added object: the collection makes a copy of it, it does not only store a pointer to the given finite element object. This same observation also holds for the other collection classes.

    It is customary that within an hp-finite element program, one keeps collections of finite elements and quadrature formulas with the same number of elements, each element of the one collection matching the element in the other. This is not necessary, but it often makes coding a lot simpler. If a collection of mappings is used, the same holds for hp::MappingCollection objects as well.

    -

    Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

    +

    Whenever p-adaptivity is considered in an hp-finite element program, a hierarchy of finite elements needs to be established to determine succeeding finite elements for refinement and preceding ones for coarsening. Typically, this hierarchy considers how finite element spaces are nested: for example, a $Q_1$ element describes a sub-space of a $Q_2$ element, and so doing $p$ refinement usually means using a larger (more accurate) finite element space. In other words, the hierarchy of finite elements is built by considering whether some elements of the collection are sub- or super-spaces of others.

    By default, we assume that finite elements are stored in an ascending order based on their polynomial degree. If the order of elements differs, a corresponding hierarchy needs to be supplied to the collection via the hp::FECollection::set_hierarchy() member function.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-11-15 06:44:26.231643011 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__mapping.html 2024-11-15 06:44:26.231643011 +0000 @@ -179,7 +179,7 @@
    -

    A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

    +

    A class that implements a polynomial mapping $Q_p$ of degree $p$ on all cells. This class is completely equivalent to the MappingQ class and there for backward compatibility.

    Definition at line 692 of file mapping_q.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-11-15 06:44:26.255643226 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__reordering.html 2024-11-15 06:44:26.255643226 +0000 @@ -260,7 +260,7 @@

    From the examples above, it is obvious that if we encounter a cell that cannot be added to the cells which have already been entered, we can not usually point to a cell that is the culprit and that must be entered in a different orientation. Furthermore, even if we knew which cell, there might be large number of cells that would then cease to fit into the grid and which we would have to find a different orientation as well (in the second example above, if we rotated cell 1, then we would have to rotate the cells 1 through N-1 as well).

    A brute force approach to this problem is the following: if cell N can't be added, then try to rotate cell N-1. If we can't rotate cell N-1 any more, then try to rotate cell N-2 and try to add cell N with all orientations of cell N-1. And so on. Algorithmically, we can visualize this by a tree structure, where node N has as many children as there are possible orientations of node N+1 (in two space dimensions, there are four orientations in which each cell can be constructed from its four vertices; for example, if the vertex indices are {0 1 3 2}, then the four possibilities would be {0, 1, 3, 2}, {1, 3, 2, 0}, {3, 2, 0, 1}, and {2, 0, 1, 3}. When adding one cell after the other, we traverse this tree in a depth-first (pre-order) fashion. When we encounter that one path from the root (cell 0) to a leaf (the last cell) is not allowed (i.e. that the orientations of the cells which are encoded in the path through the tree does not lead to a valid triangulation), we have to track back and try another path through the tree.

    In practice, of course, we do not follow each path to a final node and then find out whether a path leads to a valid triangulation, but rather use an inductive argument: if for all previously added cells the triangulation is a valid one, then we can find out whether a path through the tree can yield a valid triangulation by checking whether entering the present cell would introduce any faces that have a nonunique direction; if that is so, then we can stop following all paths below this point and track back immediately.

    -

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    +

    Nevertheless, it is already obvious that the tree has $4^N$ leaves in two space dimensions, since each of the $N$ cells can be added in four orientations. Most of these nodes can be discarded rapidly, since firstly the orientation of the first cell is irrelevant, and secondly if we add one cell that has a neighbor that has already been added, then there are already only two possible orientations left, so the total number of checks we have to make until we find a valid way is significantly smaller than $4^N$. However, the algorithm is still exponential in time and linear in memory (we only have to store the information for the present path in form of a stack of orientations of cells that have already been added).

    In fact, the two examples above show that the exponential estimate is not a pessimistic one: we indeed have to track back to one of the very first cells there to find a way to add all cells in a consistent fashion.

    This discouraging situation is greatly improved by the fact that we have an alternative algorithm for 2d that is always linear in runtime (discovered and implemented by Michael Anderson of TICAM, University of Texas, in 2003), and that for 3d we can find an algorithm that in practice is usually only roughly linear in time and memory. We will describe these algorithms in the following. A full description and theoretical analysis is given in [AABB17] .

    The 2d linear complexity algorithm

    /usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-11-15 06:44:26.291643548 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__threads.html 2024-11-15 06:44:26.291643548 +0000 @@ -312,7 +312,7 @@
    Point< 2 > second
    Definition grid_out.cc:4624
    Point< 2 > first
    Definition grid_out.cc:4623
    void transform(const InputIterator &begin_in, const InputIterator &end_in, OutputIterator out, const Function &function, const unsigned int grainsize)
    Definition parallel.h:165
    -

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    +

    In this example, we used a lambda expression to construct, on the fly, a function object that takes two arguments and returns the sum of the two. This is exactly what we needed when we want to add the individual elements of vectors $x$ and $y$ and write the sum of the two into the elements of $z$. The function object that we get here is completely known to the compiler and when it expands the loop that results from parallel::transform will be as if we had written the loop in its obvious form:

    InputIterator1 in_1 = x.begin();
    InputIterator2 in_2 = y.begin();
    OutputIterator out = z.begin();
    @@ -405,7 +405,7 @@
    }
    void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
    Definition parallel.h:452

    Here, we call the vmult_on_subrange function on sub-ranges of at least 200 elements each, so that the initial setup cost can amortize.

    -

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    +

    A related operation is when the loops over elements each produce a result that must then be accumulated (other reduction operations than addition of numbers would work as well). An example is to form the matrix norm $x^T M x$ (it really is only a norm if $M$ is positive definite, but let's assume for a moment that it is). A sequential implementation would look like this for sparse matrices:

    double SparseMatrix::mat_norm (const Vector &x) const
    {
    const double *val_ptr = &values[0];
    const unsigned int *colnum_ptr = &colnums[0];
    @@ -608,7 +608,7 @@

  • -

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    +

    The last issue that is worth addressing is that the way we wrote the MyClass::assemble_on_one_cell function above, we create and destroy an FEValues object every time the function is called, i.e. once for each cell in the triangulation. That's an immensely expensive operation because the FEValues class tries to do a lot of work in its constructor in an attempt to reduce the number of operations we have to do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ effort to initialize such an object in order to reduce the constant in the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of a triangulation). Creating and destroying an FEValues object on each cell invalidates this effort.

    The way to avoid this is to put the FEValues object into a second structure that will hold scratch data, and initialize it in the constructor:

    struct PerTaskData {
    FullMatrix<double> cell_matrix;
    Vector<double> cell_rhs;
    /usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-11-15 06:44:26.323643833 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/group__vector__valued.html 2024-11-15 06:44:26.327643869 +0000 @@ -294,8 +294,8 @@ \right) \end{eqnarray*}" src="form_302.png"/>

    -

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    -

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    +

    indeed has four components. We note that we could change the ordering of the solution components $\textbf u$ and $p$ inside $U$ if we also change columns of the matrix operator.

    +

    Next, we need to think about test functions $V$. We want to multiply both sides of the equation with them, then integrate over $\Omega$. The result should be a scalar equality. We can achieve this by choosing $V$ also vector valued as

    \begin{eqnarray*}
   V =
   \left(
@@ -463,7 +463,7 @@
 <p class=

  • -

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

    +

    These views can then be asked for information about these individual components. For example, when you write fe_values[pressure].value(i,q) you get the value of the pressure component of the $i$th shape function $V_i$ at the $q$th quadrature point. Because the extractor pressure represents a scalar component, the results of the operator fe_values[pressure].value(i,q) is a scalar number. On the other hand, the call fe_values[velocities].value(i,q) would produce the value of a whole set of dim components, which would be of type Tensor<1,dim>.

  • @@ -607,10 +607,10 @@
    }
  • So if, again, this is not the code we use in step-8, what do we do there? The answer rests on the finite element we use. In step-8, we use the following element:

    FESystem<dim> finite_element (FE_Q<dim>(1), dim);
    -

    In other words, the finite element we use consists of dim copies of the same scalar element. This is what we call a primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
+</div><!-- fragment --><p> In other words, the finite element we use consists of <code>dim</code> copies of the same scalar element. This is what we call a <a class=primitive element: an element that may be vector-valued but where each shape function has exactly one non-zero component. In other words: if the $x$-component of a displacement shape function is nonzero, then the $y$- and $z$-components must be zero and similarly for the other components. What this means is that also derived quantities based on shape functions inherit this sparsity property. For example: the divergence $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z) +
    \partial_y\varphi_y(x,y,z) + \partial_z\varphi_z(x,y,z)$ of a vector-valued shape function $\Phi(x,y,z)=(\varphi_x(x,y,z), \varphi_y(x,y,z), \varphi_z(x,y,z))^T$ is, in the present case, either $\mathrm{div}\ \Phi(x,y,z)=\partial_x\varphi_x(x,y,z)$, $\mathrm{div}\ \Phi(x,y,z)=\partial_y\varphi_y(x,y,z)$, or $\mathrm{div}\ \Phi(x,y,z)=\partial_z\varphi_z(x,y,z)$, because exactly one of the $\varphi_\ast$ is nonzero. Knowing this means that we can save a number of computations that, if we were to do them, would only yield zeros to add up.

    In a similar vein, if only one component of a shape function is nonzero, then only one row of its gradient $\nabla\Phi$ is nonzero. What this means for terms like $(\mu \nabla\Phi_i,\nabla\Phi_j)$, where the scalar product between two tensors is defined as $(\tau, \gamma)_\Omega=\int_\Omega \sum_{i,j=1}^d \tau_{ij} \gamma_{ij}$, is that the term is only nonzero if both tensors have their nonzero entries in the same row, which means that the two shape functions have to have their single nonzero component in the same location.

    -

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    +

    If we use this sort of knowledge, then we can in a first step avoid computing gradient tensors if we can determine up front that their scalar product will be nonzero, in a second step avoid building the entire tensors and only get its nonzero components, and in a final step simplify the scalar product by only considering that index $i$ for the one nonzero row, rather than multiplying and adding up zeros.

    The vehicle for all this is the ability to determine which vector component is going to be nonzero. This information is provided by the FiniteElement::system_to_component_index function. What can be done with it, using the example above, is explained in detail in step-8.

    Block solvers

    Using techniques as shown above, it isn't particularly complicated to assemble the linear system, i.e. matrix and right hand side, for a vector-valued problem. However, then it also has to be solved. This is more complicated. Naively, one could just consider the matrix as a whole. For most problems, this matrix is not going to be definite (except for special cases like the elasticity equations covered in step-8 and step-17). It will, often, also not be symmetric. This rather general class of matrices presents problems for iterative solvers: the lack of structural properties prevents the use of most efficient methods and preconditioners. While it can be done, the solution process will therefore most often be slower than necessary.

    @@ -628,7 +628,7 @@ \right), \end{eqnarray*}" src="form_337.png"/>

    -

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    +

    where $M$ represents the mass matrix that results from discretizing the identity operator $\mathbf 1$ and $B$ the equivalent of the gradient operator.

    By default, this is not what happens, however. Rather, deal.II assigns numbers to degrees of freedom in a rather random manner. Consequently, if you form a vector out of the values of degrees of freedom will not be neatly ordered in a vector like

    \begin{eqnarray*}
   \left(
@@ -668,8 +668,8 @@
   MU = F-BP.
 \end{eqnarray*}

    -

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    -

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    +

    This has the advantage that the matrices $B^TM^{-1}B$ and $M$ that we have to solve with are both symmetric and positive definite, as opposed to the large whole matrix we had before.

    +

    How a solver like this is implemented is explained in more detail in step-20, step-31, and a few other tutorial programs. What we would like to point out here is that we now need a way to extract certain parts of a matrix or vector: if we are to multiply, say, the $U$ part of the solution vector by the $M$ part of the global matrix, then we need to have a way to access these parts of the whole.

    This is where the BlockVector, BlockSparseMatrix, and similar classes come in. For all practical purposes, then can be used as regular vectors or sparse matrices, i.e. they offer element access, provide the usual vector operations and implement, for example, matrix-vector multiplications. In other words, assembling matrices and right hand sides works in exactly the same way as for the non-block versions. That said, internally they store the elements of vectors and matrices in "blocks"; for example, instead of using one large array, the BlockVector class stores it as a set of arrays each of which we call a block. The advantage is that, while the whole thing can be used as a vector, one can also access an individual block which then, again, is a vector with all the vector operations.

    To show how to do this, let us consider the second equation $MU=F-BP$ to be solved above. This can be achieved using the following sequence similar to what we have in step-20:

    Vector<double> tmp (solution.block(0).size());
    system_matrix.block(0,1).vmult (tmp, solution.block(1));
    @@ -689,7 +689,7 @@ -

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    +

    What's happening here is that we allocate a temporary vector with as many elements as the first block of the solution vector, i.e. the velocity component $U$, has. We then set this temporary vector equal to the $(0,1)$ block of the matrix, i.e. $B$, times component 1 of the solution which is the previously computed pressure $P$. The result is multiplied by $-1$, and component 0 of the right hand side, $F$ is added to it. The temporary vector now contains $F-BP$. The rest of the code snippet simply solves a linear system with $F-BP$ as right hand side and the $(0,0)$ block of the global matrix, i.e. $M$. Using block vectors and matrices in this way therefore allows us to quite easily write rather complicated solvers making use of the block structure of a linear system.

    Extracting data from solutions

    Once one has computed a solution, it is often necessary to evaluate it at quadrature points, for example to evaluate nonlinear residuals for the next Newton iteration, to evaluate the finite element residual for error estimators, or to compute the right hand side for the next time step in a time dependent problem.

    The way this is done us to again use an FEValues object to evaluate the shape functions at quadrature points, and with those also the values of a finite element function. For the example of the mixed Laplace problem above, consider the following code after solving:

    std::vector<Vector<double> > local_solution_values (n_q_points,
    /usr/share/doc/packages/dealii/doxygen/deal.II/index.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-11-15 06:44:26.343644012 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index.html 2024-11-15 06:44:26.343644012 +0000 @@ -132,7 +132,7 @@
  • DoFHandler: DoFHandler objects are the confluence of triangulations and finite elements: the finite element class describes how many degrees of freedom it needs per vertex, line, or cell, and the DoFHandler class allocates this space so that each vertex, line, or cell of the triangulation has the correct number of them. It also gives them a global numbering.

    -

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    +

    A different viewpoint is this: While the mesh and finite element describe abstract properties of the finite dimensional space $V_h$ in which we seek the discrete solution, the DoFHandler classes enumerate a concrete basis of this space so that we can represent the discrete solution as $u_h(\mathbf x)= \sum_j U_j \varphi_i(\mathbf x)$ by an ordered set of coefficients $U_j$.

    Just as with triangulation objects, most operations on DoFHandlers are done by looping over all cells and doing something on each or a subset of them. The interfaces of the two classes are therefore rather similar: they allow to get iterators to the first and last cell (or face, or line, etc) and offer information through these iterators. The information that can be gotten from these iterators is the geometric and topological information that can already be gotten from the triangulation iterators (they are in fact derived classes) as well as things like the global numbers of the degrees of freedom on the present cell. On can also ask an iterator to extract the values corresponding to the degrees of freedom on the present cell from a data vector that stores values for all degrees of freedom associated with a triangulation.

    It is worth noting that, just as triangulations, DoFHandler classes do not know anything about the mapping from the unit cell to its individual cells. It is also ignorant of the shape functions that correspond to the degrees of freedom it manages: all it knows is that there are, for example, 2 degrees of freedom for each vertex and 4 per cell interior. Nothing about their specifics is relevant to the DoFHandler class with the exception of the fact that they exist.

    The DoFHandler class and its associates are described in the Degrees of Freedom topic. In addition, there are specialized versions that can handle multilevel and hp-discretizations. These are described in the Multilevel support and hp-finite element support topics. Finite element methods frequently imply constraints on degrees of freedom, such as for hanging nodes or nodes at which boundary conditions apply; dealing with such constraints is described in the Constraints on degrees of freedom topic.

    /usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-11-15 06:44:26.359644155 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/index__set_8h.html 2024-11-15 06:44:26.359644155 +0000 @@ -155,7 +155,7 @@
  • -

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    +

    Create and return an index set of size $N$ that contains every single index within this range. In essence, this function returns an index set created by

    IndexSet is (N);
    is.add_range(0, N);

    This function exists so that one can create and initialize index sets that are complete in one step, or so one can write code like

    if (my_index_set == complete_index_set(my_index_set.size())
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-11-15 06:44:26.375644298 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Coarsening.html 2024-11-15 06:44:26.375644298 +0000 @@ -145,11 +145,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Check if data on all children match, and return value of the first child.

    -\[
+<picture><source srcset=\[
   d_{K_p} = d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2234.png"/>

    @@ -173,13 +173,13 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return sum of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2235.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data vector before and after adaptation.

    @@ -200,15 +200,15 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc"> -

    Return $ l_2 $-norm of data on all children.

    +

    Return $ l_2 $-norm of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p}^2 = \sum d_{K_c}^2
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2237.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data vector before and after adaptation.

    @@ -231,11 +231,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return mean value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \sum d_{K_c} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2238.png"/>

    @@ -259,11 +259,11 @@ const std::vector< value_type > & children_values&#href_anchor"memdoc">

    Return maximum value of data on all children.

    -\[
+<picture><source srcset=\[
   d_{K_p} = \max \left( d_{K_c} \right)
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2239.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-11-15 06:44:26.391644441 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceAdaptationStrategies_1_1Refinement.html 2024-11-15 06:44:26.391644441 +0000 @@ -141,11 +141,11 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector containing copies of data of the parent cell for each child.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2231.png"/>

    @@ -169,13 +169,13 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector which contains data of the parent cell being equally divided among all children.

    -\[
+<picture><source srcset=\[
   d_{K_c} = d_{K_p} / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2232.png"/>

    -

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_1$-norm of the corresponding global data Vector before and after adaptation.

    @@ -198,13 +198,13 @@ const value_type parent_value&#href_anchor"memdoc">

    Return a vector which contains squared data of the parent cell being equally divided among the squares of all children.

    -\[
+<picture><source srcset=\[
   d_{K_c}^2 = d_{K_p}^2 / n_\text{children}
   \qquad
   \forall K_c \text{ children of } K_p
-\] +\]" src="form_2233.png"/>

    -

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    +

    This strategy preserves the $l_2$-norm of the corresponding global data Vector before and after adaptation.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-11-15 06:44:26.403644548 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataComponentInterpretation.html 2024-11-15 06:44:26.403644548 +0000 @@ -128,7 +128,7 @@
    -

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    +

    The members of this enum are used to describe the logical interpretation of what the various components of a vector-valued data set mean. For example, if one has a finite element for the Stokes equations in 2d, representing components $(u,v,p)$, one would like to indicate that the first two, $u$ and $v$, represent a logical vector so that later on when we generate graphical output we can hand them off to a visualization program that will automatically know to render them as a vector field, rather than as two separate and independent scalar fields.

    By passing a set of enums of the current kind to the DataOut_DoFData::add_data_vector functions, this can be achieved.

    See the step-22 tutorial program for an example on how this information can be used in practice.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-11-15 06:44:26.451644977 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDataOutBase.html 2024-11-15 06:44:26.451644977 +0000 @@ -551,7 +551,7 @@

    While this discussion applies to two spatial dimensions, it is more complicated in 3d. The reason is that we could still use patches, but it is difficult when trying to visualize them, since if we use a cut through the data (by, for example, using x- and z-coordinates, a fixed y-value and plot function values in z-direction, then the patched data is not a patch in the sense GNUPLOT wants it any more. Therefore, we use another approach, namely writing the data on the 3d grid as a sequence of lines, i.e. two points each associated with one or more data sets. There are therefore 12 lines for each subcells of a patch.

    Given the lines as described above, a cut through this data in Gnuplot can then be achieved like this:

    *   set data style lines
     *   splot [:][:][0:] "T" using 1:2:(\$3==.5 ? \$4 : -1)
    -* 

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    +*

    This command plots data in $x$- and $y$-direction unbounded, but in $z$-direction only those data points which are above the $x$- $y$-plane (we assume here a positive solution, if it has negative values, you might want to decrease the lower bound). Furthermore, it only takes the data points with z-values (&3) equal to 0.5, i.e. a cut through the domain at z=0.5. For the data points on this plane, the data values of the first data set (&4) are raised in z-direction above the x-y-plane; all other points are denoted the value -1 instead of the value of the data vector and are not plotted due to the lower bound in z plotting direction, given in the third pair of brackets.

    More complex cuts are possible, including nonlinear ones. Note however, that only those points which are actually on the cut-surface are plotted.

    Definition at line 3556 of file data_out_base.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-11-15 06:44:26.471645156 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDerivativeApproximation.html 2024-11-15 06:44:26.471645156 +0000 @@ -133,17 +133,17 @@

    Detailed Description

    This namespace provides functions that compute a cell-wise approximation of the norm of a derivative of a finite element field by taking difference quotients between neighboring cells. This is a rather simple but efficient form to get an error indicator, since it can be computed with relatively little numerical effort and yet gives a reasonable approximation.

    -

    The way the difference quotients are computed on cell $K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
-}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
-\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
-\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
+<p>The way the difference quotients are computed on cell <picture><source srcset=$K$ is the following (here described for the approximation of the gradient of a finite element field, but see below for higher derivatives): let $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of the two cells, then $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\|
+}$ is an approximation of the directional derivative $ \nabla u(x_K) \cdot
+\frac{y_{K'}}{ \|y_{K'}\| }.$ By multiplying both terms by $\frac{y_{K'}}{
+\|y_{K'}\| }$ from the left and summing over all neighbors $K'$, we obtain $ \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} \frac{y_{K'}^T}{ \|y_{K'}\| }
 \right) \nabla u(x_K) \approx \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|}
-\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    -

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
-\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
+\frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$

    +

    Thus, if the matrix $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|}
+\frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is regular (which is the case when the vectors $y_{K'}$ to all neighbors span the whole space), we can obtain an approximation to the true gradient by $ \nabla u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \frac{u_h(x_{K'}) - u_h(x_K)}{
-\|y_{K'}\| } \right).$ This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    -

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    +\|y_{K'}\| } \right).$" src="form_2259.png"/> This is a quantity that is easily computed. The value returned for each cell when calling the approximate_gradient function of this class is the $l_2$ norm of this approximation to the gradient. To make this a useful quantity, you may want to scale each element by the correct power of the respective cell size.

    +

    The computation of this quantity must fail if a cell has only neighbors for which the direction vectors $y_K$ do not span the whole space, since then the matrix $Y$ is no longer invertible. If this happens, you will get an error similar to this one:

    --------------------------------------------------------
    An error occurred in line <749>
    of file <source/numerics/derivative_approximation.cc> in function
    void DerivativeApproximation::approximate(...)
    @@ -161,19 +161,19 @@
    DEAL_II_HOST constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)

    As can easily be verified, this can only happen on very coarse grids, when some cells and all their neighbors have not been refined even once. You should therefore only call the functions of this class if all cells are at least once refined. In practice this is not much of a restriction.

    Approximation of higher derivatives

    -

    Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula $ \nabla^2 u(x_K) \approx Y^{-1}
+<p>Similar to the reasoning above, approximations to higher derivatives can be computed in a similar fashion. For example, the tensor of second derivatives is approximated by the formula   <picture><source srcset=$ \nabla^2 u(x_K) \approx Y^{-1}
 \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} \otimes \frac{\nabla u_h(x_{K'})
-- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $ where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
-\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    -

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +- \nabla u_h(x_K)}{ \|y_{K'}\| } \right), $" src="form_2261.png"/> where $\otimes$ denotes the outer product of two vectors. Note that unlike the true tensor of second derivatives, its approximation is not necessarily symmetric. This is due to the fact that in the derivation, it is not clear whether we shall consider as projected second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+\nabla^2 u$. Depending on which choice we take, we obtain one approximation of the tensor of second derivatives or its transpose. To avoid this ambiguity, as result we take the symmetrized form, which is the mean value of the approximation and its transpose.

    +

    The returned value on each cell is the spectral norm of the approximated tensor of second derivatives, i.e. the largest eigenvalue by absolute value. This equals the largest curvature of the finite element field at each cell, and the spectral norm is the matrix norm associated to the $l_2$ vector norm.

    Even higher than the second derivative can be obtained along the same lines as exposed above.

    Refinement indicators based on the derivatives

    -

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
-\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
-\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    -

    Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

    +

    If you would like to base a refinement criterion upon these approximation of the derivatives, you will have to scale the results of this class by an appropriate power of the mesh width. For example, since $\|u-u_h\|^2_{L_2}
+\le C h^2 \|\nabla u\|^2_{L_2}$, it might be the right thing to scale the indicators as $\eta_K = h \|\nabla u\|_K$, i.e. $\eta_K = h^{1+d/2}
+\|\nabla u\|_{\infty;K}$, i.e. the right power is $1+d/2$.

    +

    Likewise, for the second derivative, one should choose a power of the mesh size $h$ one higher than for the gradient.

    Implementation

    -

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    +

    The formulae for the computation of approximations to the gradient and to the tensor of second derivatives shown above are very much alike. The basic difference is that in one case the finite difference quotient is a scalar, while in the other case it is a vector. For higher derivatives, this would be a tensor of even higher rank. We then have to form the outer product of this difference quotient with the distance vector $y_{KK'}$, symmetrize it, contract it with the matrix $Y^{-1}$ and compute its norm. To make the implementation simpler and to allow for code reuse, all these operations that are dependent on the actual order of the derivatives to be approximated, as well as the computation of the quantities entering the difference quotient, have been separated into auxiliary nested classes (names Gradient and SecondDerivative) and the main algorithm is simply passed one or the other data types and asks them to perform the order dependent operations. The main framework that is independent of this, such as finding all active neighbors, or setting up the matrix $Y$ is done in the main function approximate.

    Due to this way of operation, the class may be easily extended for higher order derivatives than are presently implemented. Basically, only an additional class along the lines of the derivative descriptor classes Gradient and SecondDerivative has to be implemented, with the respective alias and functions replaced by the appropriate analogues for the derivative that is to be approximated.

    Function Documentation

    @@ -281,7 +281,7 @@ const unsigned int component = 0&#href_anchor"memdoc"> -

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    +

    This function is the analogue to the one above, computing finite difference approximations of the tensor of second derivatives. Pass it the DoF handler object that describes the finite element field, a nodal value vector, and receive the cell-wise spectral norm of the approximated tensor of second derivatives. The spectral norm is the matrix norm associated to the $l_2$ vector norm.

    The last parameter denotes the solution component, for which the gradient is to be computed. It defaults to the first component. For scalar elements, this is the only valid choice; for vector-valued ones, any component between zero and the number of vector components can be given here.

    In a parallel computation the solution vector needs to contain the locally relevant unknowns.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-11-15 06:44:26.583646156 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDifferentiation_1_1SD.html 2024-11-15 06:44:26.583646156 +0000 @@ -656,7 +656,7 @@
    -

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    +

    Return a symbolic number that represents the Euler constant $e \approx 2.71828$ raised to the given exponent.

    Mimics the function std::exp(exponent) using the standard math library.

    Definition at line 59 of file symengine_math.cc.

    @@ -2520,7 +2520,7 @@

    Return an Expression representing a scalar symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    +

    For example, if the symbol is the string "x" then the scalar symbolic variable that is returned represents the scalar $x$.

    Parameters
    @@ -3660,7 +3660,7 @@

    Return a substitution map that has any explicit interdependencies between the entries of the input substitution_map resolved.

    The force_cyclic_dependency_resolution flag exists to ensure, if desired, that no cyclic dependencies can exist in the returned map. If a cyclic dependency exists in the input substitution map, substitution_map, then with this flag set to true the dependency cycle is broken by a dictionary-ordered substitution. For example, if the substitution map contains two entries map["a"] -> "b" and map["b"] -> "a", then the result of calling this function would be a map with the elements map["a"] -> "a" and map["b"] -> "a".

    If one symbol is an explicit function of another, and it is desired that all their values are completely resolved, then it may be necessary to perform substitution a number of times before the result is finalized. This function performs substitution sweeps for a set of symbolic variables until all explicit relationships between the symbols in the map have been resolved. Whether each entry returns a symbolic or real value depends on the nature of the values stored in the substitution map. If the values associated with a key are also symbolic then the returned result may still be symbolic in nature. The terminal result of using the input substitution map, symbol_values, is then guaranteed to be rendered by a single substitution of the returned dependency-resolved map.

    -

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    +

    Example: If map["a"] -> 1 and map["b"] -> "a"+ 2, then the function $f(a,b(a)) = a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is determined upon the completion of the first sweep. A second sweep is therefore necessary to resolve the final symbol, and the returned value is ultimately $f = [3+a]_{a=1} = 4$. By resolving the explicit relationships between all symbols in the map, we determine that map["a"] -> 1 and map["b"] -> 1 + 2 = 3 and thus, using only one substitution, that $f = a+b = 1 + 3 = 4$.

    @@ -3720,11 +3720,11 @@ If the symbols stored in the map are explicitly dependent on one another, then the returned result depends on the order in which the map is traversed. It is recommended to first resolve all interdependencies in the map using the resolve_explicit_dependencies() function.

    Examples:

    1. -

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

      +

      If map["a"] == 1 and map["b"] == "a" + 2, then the function $f(a,b(a)) := a+b$ will be evaluated and the result $f\vert_{a=1,b=a+2} = 3+a$ is returned. This return is because the symbol "a" is substituted throughout the function first, and only then is the symbol "b(a)" substituted, by which time its explicit dependency on "a" cannot be resolved.

    2. -If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    3. +If map["a"] == "b"+2 and map["b"] == 1, then the function $f(a(b),b): = a+b$ will be evaluated and the result $f\vert_{a=b+2, b} = [b+2+b]_{b=1} = 4$ is returned. This is because the explicitly dependent symbol "a(b)" is substituted first followed by the symbol "b".
    @@ -3875,7 +3875,7 @@
    [in]symbolAn identifier (or name) for the returned symbolic variable.

    Return a vector of Expressions representing a vectorial symbolic variable with the identifier specified by symbol.

    -

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    +

    For example, if the symbol is the string "v" then the vectorial symbolic variable that is returned represents the vector $v$. Each component of $v$ is prefixed by the given symbol, and has a suffix that indicates its component index.

    Template Parameters
    @@ -4115,7 +4115,7 @@
    dimThe dimension of the returned tensor.
    -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4144,7 +4144,7 @@
    -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4173,7 +4173,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{T}}$.
    @@ -4202,7 +4202,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial f}{\partial \mathbf{S}}$.
    @@ -4231,7 +4231,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4260,7 +4260,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4289,7 +4289,7 @@ -
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    +
    Returns
    The tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{T}}{\partial x}$.
    @@ -4318,7 +4318,7 @@ -
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    +
    Returns
    The symmetric tensor of symbolic functions or expressions representing the result $\frac{\partial \mathbf{S}}{\partial x}$.
    @@ -4347,8 +4347,8 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
-\mathbf{T}_{2}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}_{1}}{\partial
+\mathbf{T}_{2}}$.
    @@ -4377,8 +4377,8 @@ -
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
-\mathbf{S}_{2}}$.
    +
    Returns
    The symmetric tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}_{1}}{\partial
+\mathbf{S}_{2}}$.
    @@ -4407,7 +4407,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{T}}{\partial \mathbf{S}}$.
    @@ -4436,7 +4436,7 @@ -
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    +
    Returns
    The tensor of symbolic functions or variables representing the result $\frac{\partial \mathbf{S}}{\partial \mathbf{T}}$.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-11-15 06:44:26.639646656 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFRenumbering.html 2024-11-15 06:44:26.639646656 +0000 @@ -241,13 +241,13 @@

    Using the constraint information usually leads to reductions in bandwidth of 10 or 20 per cent, but may for some very unstructured grids also lead to an increase. You have to weigh the decrease in your case with the time spent to use the constraint information, which usually is several times longer than the ‘pure’ renumbering algorithm.

    In almost all cases, the renumbering scheme finds a corner to start with. Since there is more than one corner in most grids and since even an interior degree of freedom may be a better starting point, giving the starting point by the user may be a viable way if you have a simple scheme to derive a suitable point (e.g. by successively taking the third child of the cell top left of the coarsest level, taking its third vertex and the dof index thereof, if you want the top left corner vertex). If you do not know beforehand what your grid will look like (e.g. when using adaptive algorithms), searching a best starting point may be difficult, however, and in many cases will not justify the effort.

    Component-wise and block-wise numberings

    -

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    +

    For finite elements composed of several base elements using the FESystem class, or for elements which provide several components themselves, it may be of interest to sort the DoF indices by component. This will then bring out the block matrix structure, since otherwise the degrees of freedom are numbered cell-wise without taking into account that they may belong to different components. For example, one may want to sort degree of freedom for a Stokes discretization so that we first get all velocities and then all the pressures so that the resulting matrix naturally decomposes into a $2\times 2$ system.

    This kind of numbering may be obtained by calling the component_wise() function of this class. Since it does not touch the order of indices within each component, it may be worthwhile to first renumber using the Cuthill-McKee or a similar algorithm and afterwards renumbering component-wise. This will bring out the matrix structure and additionally have a good numbering within each block.

    The component_wise() function allows not only to honor enumeration based on vector components, but also allows to group together vector components into "blocks" using a defaulted argument to the various DoFRenumbering::component_wise() functions (see GlossComponent vs GlossBlock for a description of the difference). The blocks designated through this argument may, but do not have to be, equal to the blocks that the finite element reports. For example, a typical Stokes element would be

    FESystem<dim> stokes_fe (FE_Q<dim>(2), dim, // dim velocities
    FE_Q<dim>(1), 1); // one pressure
    Definition fe_system.h:208
    Definition fe_q.h:554
    -

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    +

    This element has dim+1 vector components and equally many blocks. However, one may want to consider the velocities as one logical block so that all velocity degrees of freedom are enumerated the same way, independent of whether they are $x$- or $y$-velocities. This is done, for example, in step-20 and step-22 as well as several other tutorial programs.

    On the other hand, if you really want to use block structure reported by the finite element itself (a case that is often the case if you have finite elements that have multiple vector components, e.g. the FE_RaviartThomas or FE_Nedelec elements) then you can use the DoFRenumbering::block_wise instead of the DoFRenumbering::component_wise functions.

    Cell-wise numbering

    Given an ordered vector of cells, the function cell_wise() sorts the degrees of freedom such that degrees on earlier cells of this vector will occur before degrees on later cells.

    @@ -260,7 +260,7 @@

    The MatrixFree class provides optimized algorithms for interleaving operations on vectors before and after the access of the vector data in the respective loops. The algorithm matrix_free_data_locality() makes sure that all unknowns with a short distance between the first and last access are grouped together, in order to increase the spatial data locality.

    A comparison of reordering strategies

    As a benchmark of comparison, let us consider what the different sparsity patterns produced by the various algorithms when using the $Q_2^d\times
-Q_1$ element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    +Q_1$" src="form_991.png"/> element combination typically employed in the discretization of Stokes equations, when used on the mesh obtained in step-22 after one adaptive mesh refinement in 3d. The space dimension together with the coupled finite element leads to a rather dense system matrix with, on average around 180 nonzero entries per row. After applying each of the reordering strategies shown below, the degrees of freedom are also sorted using DoFRenumbering::component_wise into velocity and pressure groups; this produces the $2\times 2$ block structure seen below with the large velocity-velocity block at top left, small pressure-pressure block at bottom right, and coupling blocks at top right and bottom left.

    The goal of reordering strategies is to improve the preconditioner. In step-22 we use a SparseILU to preconditioner for the velocity-velocity block at the top left. The quality of the preconditioner can then be measured by the number of CG iterations required to solve a linear system with this block. For some of the reordering strategies below we record this number for adaptive refinement cycle 3, with 93176 degrees of freedom; because we solve several linear systems with the same matrix in the Schur complement, the average number of iterations is reported. The lower the number the better the preconditioner and consequently the better the renumbering of degrees of freedom is suited for this task. We also state the run-time of the program, in part determined by the number of iterations needed, for the first 4 cycles on one of our machines. Note that the reported times correspond to the run time of the entire program, not just the affected solver; if a program runs twice as fast with one particular ordering than with another one, then this means that the actual solver is actually several times faster.

    @@ -472,7 +472,7 @@
    -

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector component. The numbering within each component is not touched, so a degree of freedom with index $i$, belonging to some component, and another degree of freedom with index $j$ belonging to the same component will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    You can specify that the components are ordered in a different way than suggested by the FESystem object you use. To this end, set up the vector target_component such that the entry at index i denotes the number of the target component for dofs with component i in the FESystem. Naming the same target component more than once is possible and results in a blocking of several components into one. This is discussed in step-22. If you omit this argument, the same order as given by the finite element is used.

    If one of the base finite elements from which the global finite element under consideration here, is a non-primitive one, i.e. its shape functions have more than one non-zero component, then it is not possible to associate these degrees of freedom with a single vector component. In this case, they are associated with the first vector component to which they belong.

    For finite elements with only one component, or a single non-primitive base element, this function is the identity operation.

    @@ -566,7 +566,7 @@
    -

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    +

    Sort the degrees of freedom by vector block. The numbering within each block is not touched, so a degree of freedom with index $i$, belonging to some block, and another degree of freedom with index $j$ belonging to the same block will be assigned new indices $n(i)$ and $n(j)$ with $n(i)<n(j)$ if $i<j$ and $n(i)>n(j)$ if $i>j$.

    Note
    This function only succeeds if each of the elements in the hp::FECollection attached to the DoFHandler argument has exactly the same number of blocks (see the glossary for more information). Note that this is not always given: while the hp::FECollection class ensures that all of its elements have the same number of vector components, they need not have the same number of blocks. At the same time, this function here needs to match individual blocks across elements and therefore requires that elements have the same number of blocks and that subsequent blocks in one element have the same meaning as in another element.

    Definition at line 997 of file dof_renumbering.cc.

    @@ -651,7 +651,7 @@
  • For meshes based on parallel::distributed::Triangulation, the locally owned cells of each MPI process are contiguous in Z order. That means that numbering degrees of freedom by visiting cells in Z order yields locally owned DoF indices that consist of contiguous ranges for each process. This is also true for the default ordering of DoFs on such triangulations, but the default ordering creates an enumeration that also depends on how many processors participate in the mesh, whereas the one generated by this function enumerates the degrees of freedom on a particular cell with indices that will be the same regardless of how many processes the mesh is split up between.
  • For meshes based on parallel::shared::Triangulation, the situation is more complex. Here, the set of locally owned cells is determined by a partitioning algorithm (selected by passing an object of type parallel::shared::Triangulation::Settings to the constructor of the triangulation), and in general these partitioning algorithms may assign cells to subdomains based on decisions that may have nothing to do with the Z order. (Though it is possible to select these flags in a way so that partitioning uses the Z order.) As a consequence, the cells of one subdomain are not contiguous in Z order, and if one renumbered degrees of freedom based on the Z order of cells, one would generally end up with DoF indices that on each processor do not form a contiguous range. This is often inconvenient (for example, because PETSc cannot store vectors and matrices for which the locally owned set of indices is not contiguous), and consequently this function uses the following algorithm for parallel::shared::Triangulation objects:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-11-15 06:44:26.711647299 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceDoFTools.html 2024-11-15 06:44:26.711647299 +0000 @@ -328,7 +328,7 @@

    Setting up sparsity patterns for boundary matrices

    In some cases, one wants to only work with DoFs that sit on the boundary. One application is, for example, if rather than interpolating non- homogeneous boundary values, one would like to project them. For this, we need two things: a way to identify nodes that are located on (parts of) the boundary, and a way to build matrices out of only degrees of freedom that are on the boundary (i.e. much smaller matrices, in which we do not even build the large zero block that stems from the fact that most degrees of freedom have no support on the boundary of the domain). The first of these tasks is done by the map_dof_to_boundary_indices() function (described above).

    The second part requires us first to build a sparsity pattern for the couplings between boundary nodes, and then to actually build the components of this matrix. While actually computing the entries of these small boundary matrices is discussed in the MatrixCreator namespace, the creation of the sparsity pattern is done by the create_boundary_sparsity_pattern() function. For its work, it needs to have a numbering of all those degrees of freedom that are on those parts of the boundary that we are interested in. You can get this from the map_dof_to_boundary_indices() function. It then builds the sparsity pattern corresponding to integrals like $\int_\Gamma
-\varphi_{b2d(i)} \varphi_{b2d(j)} dx$, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    +\varphi_{b2d(i)} \varphi_{b2d(j)} dx$" src="form_1008.png"/>, where $i$ and $j$ are indices into the matrix, and $b2d(i)$ is the global DoF number of a degree of freedom sitting on a boundary (i.e., $b2d$ is the inverse of the mapping returned by map_dof_to_boundary_indices() function).

    DoF coupling between surface triangulations and bulk triangulations

    When working with Triangulation and DoFHandler objects of different co-dimension, such as a Triangulation<2,3>, describing (part of) the boundary of a Triangulation<3>, and their corresponding DoFHandler objects, one often needs to build a one-to-one matching between the degrees of freedom that live on the surface Triangulation and those that live on the boundary of the bulk Triangulation. The GridGenerator::extract_boundary_mesh() function returns a mapping of surface cell iterators to face iterators, that can be used by the function map_boundary_to_bulk_dof_iterators() to construct a map between cell iterators of the surface DoFHandler, and the corresponding pair of cell iterator and face index of the bulk DoFHandler. Such map can be used to initialize FEValues and FEFaceValues for the corresponding DoFHandler objects. Notice that one must still ensure that the ordering of the quadrature points coincide in the two objects, in order to build a coupling matrix between the two sytesm.

    Enumeration Type Documentation

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-11-15 06:44:26.731647478 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFESeries.html 2024-11-15 06:44:26.731647478 +0000 @@ -178,7 +178,7 @@
    -

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    +

    Linear regression least-square fit of $y = k \, x + b$. The size of the input vectors should be equal and more than 1. The returned pair will contain $k$ (first) and $b$ (second).

    Definition at line 29 of file fe_series.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-11-15 06:44:26.771647835 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools.html 2024-11-15 06:44:26.771647835 +0000 @@ -206,7 +206,7 @@ void&#href_anchor"memTemplItemRight" valign="bottom">extrapolate (const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const DoFHandler< dim, spacedim > &dof2, const AffineConstraints< typename OutVector::value_type > &constraints, OutVector &z2) &#href_anchor"details" id="details">

    Detailed Description

    This namespace offers interpolations and extrapolations of discrete functions of one FiniteElement fe1 to another FiniteElement fe2.

    -

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    +

    It also provides the local interpolation matrices that interpolate on each cell. Furthermore it provides the difference matrix $id-I_h$ that is needed for evaluating $(id-I_h)z$ for e.g. the dual solution $z$.

    For more information about the spacedim template parameter check the documentation of FiniteElement or the one of Triangulation.

    Function Documentation

    @@ -348,7 +348,7 @@ FullMatrix< number > & difference_matrix&#href_anchor"memdoc">

    Compute the identity matrix minus the back interpolation matrix. The difference_matrix will be of size (fe1.n_dofs_per_cell(), fe1.n_dofs_per_cell()) after this function. Previous content of the argument will be overwritten.

    -

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    +

    This function computes the matrix that transforms a fe1 function $z$ to $z-I_hz$ where $I_h$ denotes the interpolation operator from the fe1 space to the fe2 space. This matrix hence is useful to evaluate error-representations where $z$ denotes the dual solution.

    @@ -374,7 +374,7 @@ FullMatrix< number > & matrix&#href_anchor"memdoc"> -

    Compute the local $L^2$-projection matrix from fe1 to fe2.

    +

    Compute the local $L^2$-projection matrix from fe1 to fe2.

    @@ -395,22 +395,22 @@

    This is a rather specialized function used during the construction of finite element objects. It is used to build the basis of shape functions for an element, given a set of polynomials and interpolation points. The function is only implemented for finite elements with exactly dim vector components. In particular, this applies to classes derived from the FE_PolyTensor class.

    -

    Specifically, the purpose of this function is as follows: FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    -\begin{align*}
+<p>Specifically, the purpose of this function is as follows: <a class=FE_PolyTensor receives, from its derived classes, an argument that describes a polynomial space. This space may be parameterized in terms of monomials, or in some other way, but is in general not in the form that we use for finite elements where we typically want to use a basis that is derived from some kind of node functional (e.g., the interpolation at specific points). Concretely, assume that the basis used by the polynomial space is $\{\tilde\varphi_j(\mathbf x)\}_{j=1}^N$, and that the node functionals of the finite element are $\{\Psi_i\}_{i=1}^N$. We then want to compute a basis $\{\varphi_j(\mathbf x)\}_{j=1}^N$ for the finite element space so that $\Psi_i[\varphi_j] = \delta_{ij}$. To do this, we can set $\varphi_j(\mathbf x) = \sum_{k=1}^N c_{jk} \tilde\varphi_k(\mathbf x)$ where we need to determine the expansion coefficients $c_{jk}$. We do this by applying $\Psi_i$ to both sides of the equation, to obtain

    +\begin{align*}
   \Psi_i [\varphi_j] = \sum_{k=1}^N c_{jk} \Psi_i[\tilde\varphi_k],
-\end{align*} +\end{align*}" src="form_1291.png"/>

    -

    and we know that the left hand side equals $\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    -\begin{align*}
+<p> and we know that the left hand side equals <picture><source srcset=$\delta_{ij}$. If you think of this as a system of $N\times N$ equations for the elements of a matrix on the left and on the right, then this can be written as

    +\begin{align*}
   I = C X^T
-\end{align*} +\end{align*}" src="form_1293.png"/>

    -

    where $C$ is the matrix of coefficients $c_{jk}$ and $X_{ik} = \Psi_i[\tilde\varphi_k]$. Consequently, in order to compute the expansion coefficients $C=X^{-T}$, we need to apply the node functionals to all functions of the "raw" basis of the polynomial space.

    -

    Until the finite element receives this matrix $X$ back, it describes its shape functions (e.g., in FiniteElement::shape_value()) in the form $\tilde\varphi_j$. After it calls this function, it has the expansion coefficients and can describe its shape functions as $\varphi_j$.

    -

    This function therefore computes this matrix $X$, for the following specific circumstances:

    @@ -945,7 +945,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference.

    Note, that this function does not work for continuous elements at hanging nodes. For that case use the interpolation_difference function, below, that takes an additional AffineConstraints object.

    @@ -987,7 +987,7 @@ OutVector & z1_difference&#href_anchor"memdoc"> -

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    +

    Compute $(Id-I_h)z_1$ for a given dof1-function $z_1$, where $I_h$ is the interpolation from fe1 to fe2. The result $(Id-I_h)z_1$ is written into z1_difference. constraints1 and constraints2 are the hanging node constraints corresponding to dof1 and dof2, respectively. These objects are particular important when continuous elements on grids with hanging nodes (locally refined grids) are involved.

    For parallel computations, supply z1 with ghost elements and z1_difference without ghost elements.

    @@ -1019,7 +1019,7 @@ OutVector & u2&#href_anchor"memdoc"> -

    $L^2$ projection for discontinuous elements. Operates the same direction as interpolate.

    +

    $L^2$ projection for discontinuous elements. Operates the same direction as interpolate.

    The global projection can be computed by local matrices if the finite element spaces are discontinuous. With continuous elements, this is impossible, since a global mass matrix must be inverted.

    @@ -1058,7 +1058,7 @@
  • It then performs a loop over all non-active cells of dof2. If such a non-active cell has at least one active child, then we call the children of this cell a "patch". We then interpolate from the children of this patch to the patch, using the finite element space associated with dof2 and immediately interpolate back to the children. In essence, this information throws away all information in the solution vector that lives on a scale smaller than the patch cell.
  • Since we traverse non-active cells from the coarsest to the finest levels, we may find patches that correspond to child cells of previously treated patches if the mesh had been refined adaptively (this cannot happen if the mesh has been refined globally because there the children of a patch are all active). We also perform the operation described above on these patches, but it is easy to see that on patches that are children of previously treated patches, the operation is now the identity operation (since it interpolates from the children of the current patch a function that had previously been interpolated to these children from an even coarser patch). Consequently, this does not alter the solution vector any more.
  • -

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    +

    The name of the function originates from the fact that it can be used to construct a representation of a function of higher polynomial degree on a once coarser mesh. For example, if you imagine that you start with a $Q_1$ function on a globally refined mesh, and that dof2 is associated with a $Q_2$ element, then this function computes the equivalent of the operator $I_{2h}^{(2)}$ interpolating the original piecewise linear function onto a quadratic function on a once coarser mesh with mesh size $2h$ (but representing this function on the original mesh with size $h$). If the exact solution is sufficiently smooth, then $u^\ast=I_{2h}^{(2)}u_h$ is typically a better approximation to the exact solution $u$ of the PDE than $u_h$ is. In other words, this function provides a postprocessing step that improves the solution in a similar way one often obtains by extrapolating a sequence of solutions, explaining the origin of the function's name.

    Note
    The resulting field does not satisfy continuity requirements of the given finite elements if the algorithm outlined above is used. When you use continuous elements on grids with hanging nodes, please use the extrapolate function with an additional AffineConstraints argument, see below.
    Since this function operates on patches of cells, it requires that the underlying grid is refined at least once for every coarse grid cell. If this is not the case, an exception will be raised.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-11-15 06:44:26.795648049 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFETools_1_1Compositing.html 2024-11-15 06:44:26.795648049 +0000 @@ -140,13 +140,13 @@
    1. Tensor product construction (do_tensor_product=true): The tensor product construction, in the simplest case, builds a vector-valued element from scalar elements (see this documentation topic and this glossary entry for more information). To give an example, consider creating a vector-valued element with two vector components, where the first should have linear shape functions and the second quadratic shape functions. In 1d, the shape functions (on the reference cell) of the base elements are then

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 &= \{ 1-x, x \},
   \\  Q_2 &= \{ 2(\frac 12 - x)(1-x), 2(x - \frac 12)x, 4x(1-x) \},
-\end{align*} +\end{align*}" src="form_1308.png"/>

      where shape functions are ordered in the usual way (first on the first vertex, then on the second vertex, then in the interior of the cell). The tensor product construction will create an element with the following shape functions:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 \times Q_2 &=
   \left\{
     \begin{pmatrix} 1-x \\ 0 \end{pmatrix},
@@ -155,7 +155,7 @@
     \begin{pmatrix} 0 \\ 2(x - \frac 12)x \end{pmatrix},
     \begin{pmatrix} 0 \\ 4x(1-x) \end{pmatrix}
   \right\}.
-\end{align*} +\end{align*}" src="form_1309.png"/>

      The list here is again in standard order.

      Of course, the procedure also works if the base elements are already vector valued themselves: in that case, the composed element simply has as many vector components as the base elements taken together.

      @@ -163,10 +163,10 @@
    2. Combining shape functions (do_tensor_product=false): In contrast to the previous strategy, combining shape functions simply takes all of the shape functions together. In the case above, this would yield the following element:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   Q_1 + Q_2 &= \{ 1-x, 2(\frac 12 - x)(1-x),
                   x, 2(x - \frac 12)x, 4x(1-x) \}.
-\end{align*} +\end{align*}" src="form_1310.png"/>

      In other words, if the base elements are scalar, the resulting element will also be. In general, the base elements all will have to have the same number of vector components.

      The element constructed above of course no longer has a linearly independent set of shape functions. As a consequence, any matrix one creates by treating all shape functions of the composed element in the same way will be singular. In practice, this strategy is therefore typically used in situations where one explicitly makes sure that certain shape functions are treated differently (e.g., by multiplying them with weight functions), or in cases where the shape functions one combines are not linearly dependent.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-11-15 06:44:26.811648192 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFiniteElementDomination.html 2024-11-15 06:44:26.811648192 +0000 @@ -139,10 +139,10 @@
      -

      An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

      +

      An enum that describes the outcome of comparing two elements for mutual domination. If one element dominates another, then the restriction of the space described by the dominated element to a face of the cell is strictly larger than that of the dominating element. For example, in 2-d Q(2) elements dominate Q(4) elements, because the traces of Q(4) elements are quartic polynomials which is a space strictly larger than the quadratic polynomials (the restriction of the Q(2) element). Similar reasonings apply for vertices and cells as well. In general, Q(k) dominates Q(k') if $k\le k'$.

      This enum is used in the FiniteElement::compare_for_domination() function that is used in the context of hp-finite element methods when determining what to do at faces where two different finite elements meet (see the hp-paper for a more detailed description of the following). In that case, the degrees of freedom of one side need to be constrained to those on the other side. The determination which side is which is based on the outcome of a comparison for mutual domination: the dominated side is constrained to the dominating one.

      -

      Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

      -

      It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

      +

      Note that there are situations where neither side dominates. The hp-paper lists two case, with the simpler one being that a $Q_2\times Q_1$ vector-valued element (i.e. a FESystem(FE_Q(2),1,FE_Q(1),1)) meets a $Q_1\times Q_2$ element: here, for each of the two vector-components, we can define a domination relationship, but it is different for the two components.

      +

      It is clear that the concept of domination doesn't matter for discontinuous elements. However, discontinuous elements may be part of vector-valued elements and may therefore be compared against each other for domination. They should return either_element_can_dominate in that case. Likewise, when comparing two identical finite elements, they should return this code; the reason is that we can not decide which element will dominate at the time we look at the first component of, for example, two $Q_2\times Q_1$ and $Q_2\times Q_2$ elements, and have to keep our options open until we get to the second base element.

      Finally, the code no_requirements exists for cases where elements impose no continuity requirements. The case is primarily meant for FE_Nothing which is an element that has no degrees of freedom in a subdomain. It could also be used by discontinuous elements, for example.

      More details on domination can be found in the hp-paper.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-11-15 06:44:26.827648335 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceFunctionTools.html 2024-11-15 06:44:26.827648335 +0000 @@ -143,12 +143,12 @@ - + - + - + - +
      const unsigned int component = 0&#href_anchor"memdoc"> -

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      +

      Estimate bounds on the value and bounds on each gradient component of a Function, $f$, over a BoundingBox, by approximating it by a 2nd order Taylor polynomial starting from the box center.

      Each lower and upper bound is returned as a std::pair<double, double>, such that the first entry is the lower bound, $L$, and the second is the upper bound, $U$, i.e. $f(x) \in [L, U]$.

      The function value, gradient, and Hessian are computed at the box center. The bounds on the value of the function are then estimated as

      $f(x) \in [f(x_c) - F, f(x_c) + F]$, where $F = \sum_i |\partial_i f(x_c)| h_i
    + 1/2 \sum_i \sum_j |\partial_i \partial_j f(x_c)| h_i h_j$.

      -

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      +

      Here, $h_i$ is half the side length of the box in the $i$th coordinate direction, which is the distance we extrapolate. The bounds on the gradient components are estimated similarly as

      $\partial_i f \in [\partial_i f(x_c) - G_i, \partial_i f(x_c) + G_i]$, where $G_i = \sum_j |\partial_i \partial_j f(x_c)| h_j$.

      If the function has more than 1 component the component parameter can be used to specify which function component the bounds should be computed for.

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-11-15 06:44:26.839648442 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGraphColoring.html 2024-11-15 06:44:26.839648442 +0000 @@ -148,9 +148,9 @@

      Create a partitioning of the given range of iterators so that iterators that point to conflicting objects will be placed into different partitions, where the question whether two objects conflict is determined by a user-provided function.

      This function can also be considered as a graph coloring: each object pointed to by an iterator is considered to be a node and there is an edge between each two nodes that conflict. The graph coloring algorithm then assigns a color to each node in such a way that two nodes connected by an edge do not have the same color.

      A typical use case for this function is in assembling a matrix in parallel. There, one would like to assemble local contributions on different cells at the same time (an operation that is purely local and so requires no synchronization) but then we need to add these local contributions to the global matrix. In general, the contributions from different cells may be to the same matrix entries if the cells share degrees of freedom and, consequently, can not happen at the same time unless we want to risk a race condition (see http://en.wikipedia.org/wiki/Race_condition). Thus, we call these two cells in conflict, and we can only allow operations in parallel from cells that do not conflict. In other words, two cells are in conflict if the set of matrix entries (for example characterized by the rows) have a nonempty intersection.

      -

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      +

      In this generality, computing the graph of conflicts would require calling a function that determines whether two iterators (or the two objects they represent) conflict, and calling it for every pair of iterators, i.e., $\frac 12 N (N-1)$ times. This is too expensive in general. A better approach is to require a user-defined function that returns for every iterator it is called for a set of indicators of some kind that characterize a conflict; two iterators are in conflict if their conflict indicator sets have a nonempty intersection. In the example of assembling a matrix, the conflict indicator set would contain the indices of all degrees of freedom on the cell pointed to (in the case of continuous Galerkin methods) or the union of indices of degree of freedom on the current cell and all cells adjacent to the faces of the current cell (in the case of discontinuous Galerkin methods, because there one computes face integrals coupling the degrees of freedom connected by a common face – see step-12).

      Note
      The conflict set returned by the user defined function passed as third argument needs to accurately describe all degrees of freedom for which anything is written into the matrix or right hand side. In other words, if the writing happens through a function like AffineConstraints::copy_local_to_global(), then the set of conflict indices must actually contain not only the degrees of freedom on the current cell, but also those they are linked to by constraints such as hanging nodes.
      -

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      +

      In other situations, the conflict indicator sets may represent something different altogether – it is up to the caller of this function to describe what it means for two iterators to conflict. Given this, computing conflict graph edges can be done significantly more cheaply than with ${\cal O}(N^2)$ operations.

      In any case, the result of the function will be so that iterators whose conflict indicator sets have overlap will not be assigned to the same color.

      Note
      The algorithm used in this function is described in a paper by Turcksin, Kronbichler and Bangerth, see workstream_paper.
      Parameters
      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-11-15 06:44:26.919649157 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridGenerator.html 2024-11-15 06:44:26.919649157 +0000 @@ -306,7 +306,7 @@
      const bool colorize = false&#href_anchor"memdoc"> -

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      +

      Initialize the given triangulation with a hypercube (line in 1d, square in 2d, etc) consisting of exactly one cell. The hypercube volume is the tensor product interval $[left,right]^{\text{dim}}$ in the present number of dimensions, where the limits are given as arguments. They default to zero and unity, then producing the unit hypercube.

      If the argument colorize is false, then all boundary indicators are set to zero (the default boundary indicator) for 2d and 3d. If it is true, the boundary is colorized as in hyper_rectangle(). In 1d the indicators are always colorized, see hyper_rectangle().

      @@ -750,7 +750,7 @@
      const bool colorize = false&#href_anchor"memdoc"> -

      Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        +

        Generate a grid consisting of a channel with a cylinder. This is a common benchmark for Navier-Stokes solvers. The geometry consists of a channel of size $[0, 2.2] \times [0, 0.41] \times [0, 0.41] $ (where the $z$ dimension is omitted in 2d) with a cylinder, parallel to the $z$ axis with diameter $0.1$, centered at $(0.2, 0.2, 0)$. The channel has three distinct regions:

        1. If n_shells is greater than zero, then there are that many shells centered around the cylinder,
        2. @@ -766,18 +766,18 @@
          -

          The resulting Triangulation uses three manifolds: a PolarManifold (in 2d) or CylindricalManifold (in 3d) with manifold id $0$, a TransfiniteInterpolationManifold with manifold id $1$, and a FlatManifold everywhere else. For more information on this topic see the glossary entry on manifold indicators. The cell faces on the cylinder and surrounding shells have manifold ids of $0$, while the cell volumes adjacent to the shells (or, if they do not exist, the cylinder) have a manifold id of $1$. Put another way: this grid uses TransfiniteInterpolationManifold to smoothly transition from the shells (generated with GridGenerator::concentric_hyper_shells) to the bulk region. All other cell volumes and faces have manifold id numbers::flat_manifold_id and use FlatManifold. All cells with id numbers::flat_manifold_id are rectangular prisms aligned with the coordinate axes.

          -

          The picture below shows part of the 2d grid (using all default arguments to this function) after two global refinements. The cells with manifold id $0$ are orange (the polar manifold id), cells with manifold id $1$ are yellow (the transfinite interpolation manifold id), and the ones with manifold id numbers::flat_manifold_id are cyan:

          +

          The resulting Triangulation uses three manifolds: a PolarManifold (in 2d) or CylindricalManifold (in 3d) with manifold id $0$, a TransfiniteInterpolationManifold with manifold id $1$, and a FlatManifold everywhere else. For more information on this topic see the glossary entry on manifold indicators. The cell faces on the cylinder and surrounding shells have manifold ids of $0$, while the cell volumes adjacent to the shells (or, if they do not exist, the cylinder) have a manifold id of $1$. Put another way: this grid uses TransfiniteInterpolationManifold to smoothly transition from the shells (generated with GridGenerator::concentric_hyper_shells) to the bulk region. All other cell volumes and faces have manifold id numbers::flat_manifold_id and use FlatManifold. All cells with id numbers::flat_manifold_id are rectangular prisms aligned with the coordinate axes.

          +

          The picture below shows part of the 2d grid (using all default arguments to this function) after two global refinements. The cells with manifold id $0$ are orange (the polar manifold id), cells with manifold id $1$ are yellow (the transfinite interpolation manifold id), and the ones with manifold id numbers::flat_manifold_id are cyan:

          Parameters
          - + - +
          triaTriangulation to be created. Must be empty upon calling this function.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          shell_region_widthWidth of the layer of shells around the cylinder. This value should be between $0$ and $0.05$; the default value is $0.03$.
          n_shellsNumber of shells to use in the shell layer.
          skewnessParameter controlling how close the shells are to the cylinder: see the mathematical definition given in GridGenerator::concentric_hyper_shells.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          colorizeIf true, then assign different boundary ids to different parts of the boundary. For more information on boundary indicators see this glossary entry. The left boundary (at $x = 0$) is assigned an id of $0$, the right boundary (at $x = 2.2$) is assigned an id of $1$; the boundary of the obstacle in the middle (i.e., the circle in 2d or the cylinder walls in 3d) is assigned an id of $2$, and the channel walls are assigned an id of $3$.
          @@ -1148,12 +1148,12 @@
      const unsigned int n_rotate_middle_square&#href_anchor"memdoc"> -

      Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

      +

      Generate a 2d mesh consisting of five squares arranged in a plus-shape. Depending on the number n_rotate_middle_square passed the middle square is rotated by a degree of n_rotate_middle_square $\pi/2$. This way one can generate a mesh in which the middle square contains edges that have the opposite tangential and/or opposite normal orientation compared to the neighboring edges of the other squares.

      This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

      Parameters
      - +
      [out]triaThe input triangulation.
      [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
      [in]n_rotate_middle_squarenumber of rotations in [0,4) of right square by $\pi/2$.
      @@ -1190,7 +1190,7 @@
      const bool manipulate_left_cube&#href_anchor"memdoc"> -

      Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

      +

      Generate a 3d mesh consisting of the unit cube joined with a copy shifted by $s = (1,0,0)$. Depending on the flags passed either the right or the left cube (when looking at the positively oriented (x,z)-plane) contains a face that is either not in standard orientation and/or is rotated by either $\pi/2$, $\pi$ or $3/2\pi$.

      This mesh is not overly useful from a practical point of view. For debugging purposes it can be used to check for orientation issues for vector- or tensor-valued finite elements.

      Parameters
      @@ -1342,7 +1342,7 @@ - +
      const double half_length = 1.&#href_anchor"memdoc"> -

      Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

      +

      Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius.

      The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

      The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

      Precondition
      The triangulation passed as argument needs to be empty when calling this function.
      @@ -1377,7 +1377,7 @@
      const double half_length = 1.&#href_anchor"memdoc"> -

      Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

      +

      Create a dim dimensional cylinder where the $x$-axis serves as the axis of the cylinder. For the purposes of this function, a cylinder is defined as a (dim - 1) dimensional disk of given radius, extruded along the axis of the cylinder (which is the first coordinate direction). Consequently, in three dimensions, the cylinder extends from x=-half_length to x=+half_length and its projection into the yz-plane is a circle of radius radius. In two dimensions, the cylinder is a rectangle from x=-half_length to x=+half_length and from y=-radius to y=radius. This function is only implemented for dim==3.

      The boundaries are colored according to the following scheme: 0 for the hull of the cylinder, 1 for the left hand face and 2 for the right hand face (see the glossary entry on colorization).

      The manifold id for the hull of the cylinder is set to zero, and a CylindricalManifold is attached to it.

      @@ -1484,7 +1484,7 @@
      triaAn empty triangulation which will hold the pipe junction geometry.
      openingsCenter point and radius of each of the three openings. The container has to be of size three.
      bifurcationCenter point of the bifurcation and hypothetical radius of each truncated cone at the bifurcation.
      aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
      aspect_ratioAspect ratio of cells, specified as radial over z-extension. Default ratio is $\Delta r/\Delta z = 1/2$.
      @@ -1499,13 +1499,13 @@
      Point Radius
      Openings $(2,0,0)$ $1$
      Openings $(2,0,0)$ $1$
      $(0,2,0)$ $1$
      $(0,2,0)$ $1$
      $(0,0,2)$ $1$
      $(0,0,2)$ $1$
      Bifurcation $(0,0,0)$ $1$
      Bifurcation $(0,0,0)$ $1$
      @@ -1518,13 +1518,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(0,2,0)$ $1$ +$(0,2,0)$ $1$ -$(2,0,0)$ $1$ +$(2,0,0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$
      @@ -1537,13 +1537,13 @@ Point Radius -Openings $(-2,0,0)$ $1$ +Openings $(-2,0,0)$ $1$ -$(1,\sqrt{3},0)$ $1$ +$(1,\sqrt{3},0)$ $1$ -$(1,-\sqrt{3},0)$ $1$ +$(1,-\sqrt{3},0)$ $1$ -Bifurcation $(0,0,0)$ $1$ +Bifurcation $(0,0,0)$ $1$

      Definition at line 266 of file grid_generator_pipe_junction.cc.

      @@ -1578,7 +1578,7 @@
      Parameters
      - +
      triaA Triangulation object which has to be empty.
      sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
      sizesA vector of integers of dimension GeometryInfo<dim>::faces_per_cell with the following meaning: the legs of the cross are stacked on the faces of the center cell, in the usual order of deal.II cells, namely first $-x$, then $x$, then $-y$ and so on. The corresponding entries in sizes name the number of cells stacked on this face. All numbers may be zero, thus L- and T-shaped domains are specializations of this domain.
      colorize_cellsIf colorization is enabled, then the material id of a cells corresponds to the leg it is in. The id of the center cell is zero, and then the legs are numbered starting at one (see the glossary entry on colorization).
      @@ -1765,9 +1765,9 @@
    3. 96 for the rhombic dodecahedron refined once. This choice dates from an older version of deal.II before the Manifold classes were implemented: today this choce is equivalent to the rhombic dodecahedron after performing one global refinement.
    4. -Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
    5. +Numbers of the kind $192\times 2^m$ with $m\geq 0$ integer. This choice is similar to the 24 and 48 cell cases, but provides additional refinements in azimuthal direction combined with a single layer in radial direction. The base mesh is either the 6 or 12 cell version, depending on whether $m$ in the power is odd or even, respectively.
    6. -

      The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

      +

      The versions with 24, 48, and $2^m 192$ cells are useful if the shell is thin and the radial lengths should be made more similar to the circumferential lengths.

      The 3d grids with 12 and 96 cells are plotted below:

      @@ -1920,7 +1920,7 @@ const bool colorize = false&#href_anchor"memdoc">

      Produce a domain that is the intersection between a hyper-shell with given inner and outer radius, i.e. the space between two circles in two space dimensions and the region between two spheres in 3d, and the positive quadrant (in 2d) or octant (in 3d). In 2d, this is indeed a quarter of the full annulus, while the function is a misnomer in 3d because there the domain is not a quarter but one eighth of the full shell.

      If the number of initial cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio in 2d.

      -

      If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

      +

      If colorize is set to true, the inner, outer, left, and right boundary get indicator 0, 1, 2, and 3 in 2d, respectively. Otherwise all indicators are set to 0. In 3d indicator 2 is at the face $x=0$, 3 at $y=0$, 4 at $z=0$ (see the glossary entry on colorization).

      All manifold ids are set to zero, and a SphericalManifold is attached to the triangulation.

      Precondition
      The triangulation passed as argument needs to be empty when calling this function.
      @@ -1968,7 +1968,7 @@ const bool colorize = false&#href_anchor"memdoc"> -

      Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

      +

      Produce a domain that is the space between two cylinders in 3d, with given length, inner and outer radius and a given number of elements. The cylinder shell is built around the $z$-axis with the two faces located at $z = 0$ and $z = $ length.

      If n_radial_cells is zero (as is the default), then it is computed adaptively such that the resulting elements have the least aspect ratio. The same holds for n_axial_cells.

      If colorize is set to true, a boundary id of 0 is set for the inner cylinder, a boundary id of 1 is set for the outer cylinder, a boundary id of 2 is set for the bottom (z-) boundary and a boundary id of 3 is set for the top (z+) boundary.

      Note
      Although this function is declared as a template, it does not make sense in 1d and 2d. Also keep in mind that this object is rotated and positioned differently than the one created by cylinder().
      @@ -2016,9 +2016,9 @@
      -

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      +

      Produce the volume or surface mesh of a torus. The axis of the torus is the $y$-axis while the plane of the torus is the $x$- $z$ plane.

      If dim is 3, the mesh will be the volume of the torus, using a mesh equivalent to the circle in the poloidal coordinates with 5 cells on the cross section. This function attaches a TorusManifold to all boundary faces which are marked with a manifold id of 1, a CylindricalManifold to the interior cells and all their faces which are marked with a manifold id of 2 (representing a flat state within the poloidal coordinates), and a TransfiniteInterpolationManifold to the cells between the TorusManifold on the surface and the ToroidalManifold in the center, with cells marked with manifold id 0.

      -

      An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

      +

      An example for the case if dim is 3 with a cut through the domain at $z=0$, 6 toroidal cells, $R=2$ and $r=0.5$ without any global refinement is given here:

      @@ -2030,7 +2030,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-11-15 06:44:26.943649371 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridRefinement.html 2024-11-15 06:44:26.943649371 +0000 @@ -229,7 +229,7 @@

    -

    As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

    +

    As an example, with no coarsening, setting top_fraction_of_cells to 1/3 will result in approximately doubling the number of cells in two dimensions. That is because each of these 1/3 of cells will be replaced by its four children, resulting in $4\times \frac 13 N$ cells, whereas the remaining 2/3 of cells remains untouched – thus yielding a total of $4\times \frac 13 N + \frac 23 N = 2N$ cells. The same effect in three dimensions is achieved by refining 1/7th of the cells. These values are therefore frequently used because they ensure that the cost of computations on subsequent meshes become expensive sufficiently quickly that the fraction of time spent on the coarse meshes is not too large. On the other hand, the fractions are small enough that mesh adaptation does not refine too many cells in each step.

    Note
    This function only sets the coarsening and refinement flags. The mesh is not changed until you call Triangulation::execute_coarsening_and_refinement().
    Parameters
    @@ -289,14 +289,14 @@

    This function provides a strategy to mark cells for refinement and coarsening with the goal of controlling the reduction of the error estimate.

    Also known as the bulk criterion or Dörfler marking, this function computes the thresholds for refinement and coarsening such that the criteria of cells getting flagged for refinement make up for a certain fraction of the total error. We explain its operation for refinement, coarsening works analogously.

    Let cK be the criterion of cell K. Then the total error estimate is computed by the formula

    -\[
+<picture><source srcset=\[
 E = \sum_{K\in \cal T} c_K.
-\] +\]" src="form_1444.png"/>

    -

    If 0 < a < 1 is top_fraction, then we refine the smallest subset $\cal M$ of the Triangulation $\cal T$ such that

    -\[
+<p>If <em> 0 < a < 1</em> is <code>top_fraction</code>, then we refine the smallest subset <picture><source srcset=$\cal M$ of the Triangulation $\cal T$ such that

    +\[
 a E \le \sum_{K\in \cal M} c_K
-\] +\]" src="form_1447.png"/>

    The algorithm is performed by the greedy algorithm described in refine_and_coarsen_fixed_number().

    Note
    The often used formula with squares on the left and right is recovered by actually storing the square of cK in the vector criteria.
    @@ -339,32 +339,32 @@
    const unsigned int order = 2&#href_anchor"memdoc"> -

    This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell $K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    -

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    -\[
+<p>This function flags cells of a triangulation for refinement with the aim to reach a grid that is optimal with respect to an objective function that tries to balance reducing the error and increasing the numerical cost when the mesh is refined. Specifically, this function makes the assumption that if you refine a cell <picture><source srcset=$K$ with error indicator $\eta_K$ provided by the second argument to this function, then the error on the children (for all children together) will only be $2^{-\text{order}}\eta_K$ where order is the third argument of this function. This makes the assumption that the error is only a local property on a mesh and can be reduced by local refinement – an assumption that is true for the interpolation operator, but not for the usual Galerkin projection, although it is approximately true for elliptic problems where the Greens function decays quickly and the error here is not too much affected by a too coarse mesh somewhere else.

    +

    With this, we can define the objective function this function tries to optimize. Let us assume that the mesh currently has $N_0$ cells. Then, if we refine the $m$ cells with the largest errors, we expect to get (in $d$ space dimensions)

    +\[
   N(m) = (N_0-m) + 2^d m = N_0 + (2^d-1)m
-\] +\]" src="form_1451.png"/>

    -

    cells ( $N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    -\[
+<p> cells ( <picture><source srcset=$N_0-m$ are not refined, and each of the $m$ cells we refine yield $2^d$ child cells. On the other hand, with refining $m$ cells, and using the assumptions above, we expect that the error will be

    +\[
   \eta^\text{exp}(m)
   =
   \sum_{K, K\; \text{will not be refined}} \eta_K
   +
   \sum_{K, K\; \text{will be refined}} 2^{-\text{order}}\eta_K
-\] +\]" src="form_1454.png"/>

    -

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    +

    where the first sum extends over $N_0-m$ cells and the second over the $m$ cells that will be refined. Note that $N(m)$ is an increasing function of $m$ whereas $\eta^\text{exp}(m)$ is a decreasing function.

    This function then tries to find that number $m$ of cells to mark for refinement for which the objective function

    -\[
+<picture><source srcset=\[
   J(m) = N(m)^{\text{order}/d} \eta^\text{exp}(m)
-\] +\]" src="form_1457.png"/>

    is minimal.

    The rationale for this function is two-fold. First, compared to the refine_and_coarsen_fixed_fraction() and refine_and_coarsen_fixed_number() functions, this function has the property that if all refinement indicators are the same (i.e., we have achieved a mesh where the error per cell is equilibrated), then the entire mesh is refined. This is based on the observation that a mesh with equilibrated error indicators is the optimal mesh (i.e., has the least overall error) among all meshes with the same number of cells. (For proofs of this, see R. Becker, M. Braack, R. Rannacher: "Numerical simulation of laminar flames at low Mach number with adaptive finite elements", Combustion Theory and Modelling, Vol. 3, Nr. 3, p. 503-534 1999; and W. Bangerth, R. Rannacher: "Adaptive Finite Element Methods for Differential Equations", Birkhauser, 2003.)

    -

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    +

    Second, the function uses the observation that ideally, the error behaves like $e \approx c N^{-\alpha}$ with some constant $\alpha$ that depends on the dimension and the finite element degree. It should - given optimal mesh refinement - not depend so much on the regularity of the solution, as it is based on the idea, that all singularities can be resolved by refinement. Mesh refinement is then based on the idea that we want to make $c=e N^\alpha$ small. This corresponds to the functional $J(m)$ above.

    Note
    This function was originally implemented by Thomas Richter. It follows a strategy described in [Richter2005]. See in particular Section 4.3, pp. 42-43.

    Definition at line 447 of file grid_refinement.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-11-15 06:44:27.051650336 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceGridTools.html 2024-11-15 06:44:27.051650336 +0000 @@ -490,7 +490,7 @@
    Triangulation< dim, spacedim > & triangulation&#href_anchor"memdoc">

    Transform the vertices of the given triangulation by applying the function object provided as first argument to all its vertices.

    -

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    +

    The transformation given as argument is used to transform each vertex. Its respective type has to offer a function-like syntax, i.e. the predicate is either an object of a type that has an operator(), or it is a pointer to a non-member function, or it is a lambda function object. In either case, argument and return value have to be of type Point<spacedim>. An example – a simple transformation that moves the object two units to the right in the $x_1$ direction – could look like as follows:

    ... // fill triangulation with something
    {
    @@ -671,13 +671,13 @@
    const bool solve_for_absolute_positions = false&#href_anchor"memdoc">

    Transform the given triangulation smoothly to a different domain where, typically, each of the vertices at the boundary of the triangulation is mapped to the corresponding points in the new_points map.

    -

    The unknown displacement field $u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    -\[ \min\, \int \frac{1}{2}
+<p>The unknown displacement field <picture><source srcset=$u_d(\mathbf x)$ in direction $d$ is obtained from the minimization problem

    +\[ \min\, \int \frac{1}{2}
   c(\mathbf x)
   \mathbf \nabla u_d(\mathbf x) \cdot
   \mathbf \nabla u_d(\mathbf x)
   \,\rm d x
-\] +\]" src="form_1463.png"/>

    subject to prescribed constraints. The minimizer is obtained by solving the Laplace equation of the dim components of a displacement field that maps the current domain into one described by new_points . Linear finite elements with four Gaussian quadrature points in each direction are used. The difference between the vertex positions specified in new_points and their current value in tria therefore represents the prescribed values of this displacement field at the boundary of the domain, or more precisely at all of those locations for which new_points provides values (which may be at part of the boundary, or even in the interior of the domain). The function then evaluates this displacement field at each unconstrained vertex and uses it to place the mapped vertex where the displacement field locates it. Because the solution of the Laplace equation is smooth, this guarantees a smooth mapping from the old domain to the new one.

    Parameters
    @@ -2149,7 +2149,7 @@

    This function does the same as the previous one, i.e. it partitions a triangulation using a partitioning algorithm into a number of subdomains identified by the cell->subdomain_id() flag.

    The difference to the previous function is the second argument, a sparsity pattern that represents the connectivity pattern between cells.

    -

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    +

    While the function above builds it directly from the triangulation by considering which cells neighbor each other, this function can take a more refined connectivity graph. The sparsity pattern needs to be of size $N\times N$, where $N$ is the number of active cells in the triangulation. If the sparsity pattern contains an entry at position $(i,j)$, then this means that cells $i$ and $j$ (in the order in which they are traversed by active cell iterators) are to be considered connected; partitioning algorithm will then try to partition the domain in such a way that (i) the subdomains are of roughly equal size, and (ii) a minimal number of connections are broken.

    This function is mainly useful in cases where connections between cells exist that are not present in the triangulation alone (otherwise the previous function would be the simpler one to use). Such connections may include that certain parts of the boundary of a domain are coupled through symmetric boundary conditions or integrals (e.g. friction contact between the two sides of a crack in the domain), or if a numerical scheme is used that not only connects immediate neighbors but a larger neighborhood of cells (e.g. when solving integral equations).

    In addition, this function may be useful in cases where the default sparsity pattern is not entirely sufficient. This can happen because the default is to just consider face neighbors, not neighboring cells that are connected by edges or vertices. While the latter couple when using continuous finite elements, they are typically still closely connected in the neighborship graph, and partitioning algorithm will not usually cut important connections in this case. However, if there are vertices in the mesh where many cells (many more than the common 4 or 6 in 2d and 3d, respectively) come together, then there will be a significant number of cells that are connected across a vertex, but several degrees removed in the connectivity graph built only using face neighbors. In a case like this, partitioning algorithm may sometimes make bad decisions and you may want to build your own connectivity graph.

    Note
    If the weight signal has been attached to the triangulation, then this will be used and passed to the partitioner.
    @@ -2572,7 +2572,7 @@
    const FullMatrix< double > & matrix = FullMatrix<double>()&#href_anchor"memdoc">

    An orthogonal equality test for faces.

    face1 and face2 are considered equal, if a one to one matching between its vertices can be achieved via an orthogonal equality relation. If no such relation exists then the returned std::optional object is empty (i.e., has_value() will return false).

    -

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    +

    Here, two vertices v_1 and v_2 are considered equal, if $M\cdot v_1 + offset - v_2$ is parallel to the unit vector in unit direction direction. If the parameter matrix is a reference to a spacedim x spacedim matrix, $M$ is set to matrix, otherwise $M$ is the identity matrix.

    If the matching was successful, the relative orientation of face1 with respect to face2 is returned a std::optional<unsigned char>, in which the stored value is the same orientation bit format used elsewhere in the library. More information on that topic can be found in the glossary article.

    Definition at line 2426 of file grid_tools_dof_handlers.cc.

    @@ -2623,8 +2623,8 @@

    This function tries to match all faces belonging to the first boundary with faces belonging to the second boundary with the help of orthogonal_equality().

    The unsigned char that is returned inside of PeriodicFacePair encodes the relative orientation of the first face with respect to the second face, see the documentation of orthogonal_equality() for further details.

    The direction refers to the space direction in which periodicity is enforced. When matching periodic faces this vector component is ignored.

    -

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    -

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    +

    The offset is a vector tangential to the faces that is added to the location of vertices of the 'first' boundary when attempting to match them to the corresponding vertices of the 'second' boundary. This can be used to implement conditions such as $u(0,y)=u(1,y+1)$.

    +

    Optionally, a $dim\times dim$ rotation matrix can be specified that describes how vector valued DoFs of the first face should be modified prior to constraining to the DoFs of the second face. The matrix is used in two places. First, matrix will be supplied to orthogonal_equality() and used for matching faces: Two vertices $v_1$ and $v_2$ match if $\text{matrix}\cdot v_1 + \text{offset} - v_2$ is parallel to the unit vector in unit direction direction. (For more details see DoFTools::make_periodicity_constraints(), the glossary glossary entry on periodic conditions and step-45). Second, matrix will be stored in the PeriodicFacePair collection matched_pairs for further use.

    Template Parameters
    @@ -3154,8 +3154,8 @@
    MeshTypeA type that satisfies the requirements of the MeshType concept.
    -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature. This version of the function uses a linear mapping to compute the JxW values on each cell.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim).

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3187,8 +3187,8 @@
    const Mapping< dim, spacedim > & mapping&#href_anchor"memdoc"> -

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
-\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    +

    Compute the volume (i.e. the dim-dimensional measure) of the triangulation. We compute the measure using the integral $\sum_K \int_K 1
+\; dx$ where $K$ are the cells of the given triangulation. The integral is approximated via quadrature for which we use the mapping argument.

    If the triangulation is a dim-dimensional one embedded in a higher dimensional space of dimension spacedim, then the value returned is the dim-dimensional measure. For example, for a two-dimensional triangulation in three-dimensional space, the value returned is the area of the surface so described. (This obviously makes sense since the spacedim-dimensional measure of a dim-dimensional triangulation would always be zero if dim < spacedim.

    This function also works for objects of type parallel::distributed::Triangulation, in which case the function is a collective operation.

    Parameters
    @@ -3326,8 +3326,8 @@
    -

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
-p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    +

    This function computes an affine approximation of the map from the unit coordinates to the real coordinates of the form $p_\text{real} = A
+p_\text{unit} + b $ by a least squares fit of this affine function to the $2^\text{dim}$ vertices representing a quadrilateral or hexahedral cell in spacedim dimensions. The result is returned as a pair with the matrix A as the first argument and the vector b describing distance of the plane to the origin.

    For any valid mesh cell whose geometry is not degenerate, this operation results in a unique affine mapping, even in cases where the actual transformation by a bi-/trilinear or higher order mapping might be singular. The result is exact in case the transformation from the unit to the real cell is indeed affine, such as in one dimension or for Cartesian and affine (parallelogram) meshes in 2d/3d.

    This approximation is underlying the function TriaAccessor::real_to_unit_cell_affine_approximation() function.

    For exact transformations to the unit cell, use Mapping::transform_real_to_unit_cell().

    @@ -3358,7 +3358,7 @@ const Quadrature< dim > & quadrature&#href_anchor"memdoc"> -

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    +

    Computes an aspect ratio measure for all locally-owned active cells and fills a vector with one entry per cell, given a triangulation and mapping. The size of the vector that is returned equals the number of active cells. The vector contains zero for non locally-owned cells. The aspect ratio of a cell is defined as the ratio of the maximum to minimum singular value of the Jacobian, taking the maximum over all quadrature points of a quadrature rule specified via quadrature. For example, for the special case of rectangular elements in 2d with dimensions $a$ and $b$ ( $a \geq b$), this function returns the usual aspect ratio definition $a/b$. The above definition using singular values is a generalization to arbitrarily deformed elements. This function is intended to be used for $d=2,3$ space dimensions, but it can also be used for $d=1$ returning a value of 1.

    Note
    Inverted elements do not throw an exception. Instead, a value of inf is written into the vector in case of inverted elements.
    Make sure to use enough quadrature points for a precise calculation of the aspect ratio in case of deformed elements.
    @@ -3560,7 +3560,7 @@ const double tol = 1e-12&#href_anchor"memdoc">

    Remove vertices that are duplicated, due to the input of a structured grid, for example. If these vertices are not removed, the faces bounded by these vertices become part of the boundary, even if they are in the interior of the mesh.

    -

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    +

    This function is called by some GridIn::read_* functions. Only the vertices with indices in considered_vertices are tested for equality. This speeds up the algorithm, which is, for worst-case hyper cube geometries $O(N^{3/2})$ in 2d and $O(N^{5/3})$ in 3d: quite slow. However, if you wish to consider all vertices, simply pass an empty vector. In that case, the function fills considered_vertices with all vertices.

    Two vertices are considered equal if their difference in each coordinate direction is less than tol. This implies that nothing happens if the tolerance is set to zero.

    Definition at line 348 of file grid_tools_topology.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-11-15 06:44:27.075650550 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators.html 2024-11-15 06:44:27.075650550 +0000 @@ -141,9 +141,9 @@

    The namespace L2 contains functions for mass matrices and L2-inner products.

    Notational conventions

    In most cases, the action of a function in this namespace can be described by a single integral. We distinguish between integrals over cells Z and over faces F. If an integral is denoted as

    -\[
+<picture><source srcset=\[
   \int_Z u \otimes v \,dx,
-\] +\]" src="form_1639.png"/>

    it will yield the following results, depending on the type of operation

    • @@ -153,7 +153,7 @@
    • If the function returns a number, then this number is the integral of the two given functions u and v.
    -

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    +

    We will use regular cursive symbols $u$ for scalars and bold symbols $\mathbf u$ for vectors. Test functions are always v and trial functions are always u. Parameters are Greek and the face normal vectors are $\mathbf n = \mathbf n_1 = -\mathbf n_2$.

    Signature of functions

    Functions in this namespace follow a generic signature. In the simplest case, you have two related functions

    template <int dim>
    void
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-11-15 06:44:27.099650764 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Divergence.html 2024-11-15 06:44:27.099650764 +0000 @@ -170,7 +170,7 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for divergence. The derivative is on the trial function.

    -\[ \int_Z v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    @@ -206,8 +206,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in strong form.

    -\[ \int_Z
-v\nabla \cdot \mathbf u \,dx \] +\[ \int_Z
+v\nabla \cdot \mathbf u \,dx \]

    This is the strong divergence operator and the trial space should be at least Hdiv. The test functions may be discontinuous.

    The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -244,8 +244,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the divergence operator in weak form.

    -\[ - \int_Z
-\nabla v \cdot \mathbf u \,dx \] +\[ - \int_Z
+\nabla v \cdot \mathbf u \,dx \]

    This is the weak divergence operator and the test space should be at least H1. The trial functions may be discontinuous.

    Todo
    Verify: The function cell_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -282,8 +282,8 @@ double factor = 1.&#href_anchor"memdoc">

    Cell matrix for gradient. The derivative is on the trial function.

    -\[
-\int_Z \nabla u \cdot \mathbf v\,dx \] +\[
+\int_Z \nabla u \cdot \mathbf v\,dx \]

    This is the strong gradient and the trial space should be at least in H1. The test functions can be discontinuous.

    @@ -319,8 +319,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in strong form.

    -\[ \int_Z
-\mathbf v\cdot\nabla u \,dx \] +\[ \int_Z
+\mathbf v\cdot\nabla u \,dx \]

    This is the strong gradient operator and the trial space should be at least H1. The test functions may be discontinuous.

    The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.

    @@ -357,8 +357,8 @@ const double factor = 1.&#href_anchor"memdoc">

    The residual of the gradient operator in weak form.

    -\[ -\int_Z
-\nabla\cdot \mathbf v u \,dx \] +\[ -\int_Z
+\nabla\cdot \mathbf v u \,dx \]

    This is the weak gradient operator and the test space should be at least Hdiv. The trial functions may be discontinuous.

    Todo
    Verify: The function gradient_matrix() is the Frechet derivative of this function with respect to the test functions.
    @@ -395,7 +395,7 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \] +\[ \int_F (\mathbf u\cdot \mathbf n) v \,ds \]

    Definition at line 258 of file divergence.h.

    @@ -435,9 +435,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the normal component of the vector valued trial space and the test space.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u\cdot \mathbf n) v \,ds
-\] +\]" src="form_1604.png"/>

    Definition at line 291 of file divergence.h.

    @@ -472,9 +472,9 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the gradient operator, namely the product of the normal component of the vector valued test space and the trial space.

    -\[
+<picture><source srcset=\[
 \int_F u (\mathbf v\cdot \mathbf n) \,ds
-\] +\]" src="form_1605.png"/>

    Definition at line 323 of file divergence.h.

    @@ -534,10 +534,10 @@ double factor = 1.&#href_anchor"memdoc">

    The trace of the divergence operator, namely the product of the jump of the normal component of the vector valued trial function and the mean value of the test function.

    -\[
+<picture><source srcset=\[
 \int_F (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
 \frac{v_1+v_2}{2} \,ds
-\] +\]" src="form_1606.png"/>

    Definition at line 357 of file divergence.h.

    @@ -587,12 +587,12 @@ double factor = 1.&#href_anchor"memdoc">

    The jump of the normal component

    -\[
+<picture><source srcset=\[
 \int_F
  (\mathbf u_1\cdot \mathbf n_1 + \mathbf u_2 \cdot \mathbf n_2)
  (\mathbf v_1\cdot \mathbf n_1 + \mathbf v_2 \cdot \mathbf n_2)
 \,ds
-\] +\]" src="form_1607.png"/>

    Definition at line 416 of file divergence.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-11-15 06:44:27.119650943 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Elasticity.html 2024-11-15 06:44:27.119650943 +0000 @@ -167,7 +167,7 @@

    The linear elasticity operator in weak form, namely double contraction of symmetric gradients.

    -\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \] +\[ \int_Z \varepsilon(u): \varepsilon(v)\,dx \]

    Definition at line 50 of file elasticity.h.

    @@ -210,7 +210,7 @@

    Vector-valued residual operator for linear elasticity in weak form

    -\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \] +\[ - \int_Z \varepsilon(u): \varepsilon(v) \,dx \]

    Definition at line 83 of file elasticity.h.

    @@ -252,10 +252,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n\Bigr)\;ds.
-\] +\]" src="form_1610.png"/>

    Definition at line 122 of file elasticity.h.

    @@ -297,10 +297,10 @@

    The matrix for the weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau \cdot v_\tau - n^T \epsilon(u_\tau) v_\tau -
 u_\tau^T \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1611.png"/>

    Definition at line 177 of file elasticity.h.

    @@ -350,12 +350,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) \cdot v - n^T \epsilon(u) v - (u-g) \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1612.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 256 of file elasticity.h.

    @@ -411,10 +411,10 @@

    The weak boundary condition of Nitsche type for the tangential displacement in linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u_\tau-g_\tau) \cdot v_\tau - n^T \epsilon(u_\tau) v
 - (u_\tau-g_\tau) \epsilon(v_\tau) n\Bigr)\;ds.
-\] +\]" src="form_1614.png"/>

    Definition at line 308 of file elasticity.h.

    @@ -459,12 +459,12 @@ double factor = 1.&#href_anchor"memdoc">

    Homogeneous weak boundary condition for the elasticity operator by Nitsche, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u \cdot v - n^T \epsilon(u) v - u \epsilon(v)
 n^T\Bigr)\;ds.
-\] +\]" src="form_1615.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. $n$ is the outer normal vector and $\gamma$ is the usual penalty parameter.

    Definition at line 386 of file elasticity.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-11-15 06:44:27.139651122 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1GradDiv.html 2024-11-15 06:44:27.139651122 +0000 @@ -150,9 +150,9 @@ double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div operator penalizing volume changes

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 51 of file grad_div.h.

    @@ -187,9 +187,9 @@ const double factor = 1.&#href_anchor"memdoc">

    The weak form of the grad-div residual

    -\[
+<picture><source srcset=\[
  \int_Z \nabla\cdot u \nabla \cdot v \,dx
-\] +\]" src="form_1616.png"/>

    Definition at line 85 of file grad_div.h.

    @@ -231,10 +231,10 @@

    The matrix for the weak boundary condition of Nitsche type for linear elasticity:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u \cdot n)(v \cdot n)  - \nabla\cdot u
 v\cdot n - u \cdot n \nabla \cdot v \Bigr)\;ds.
-\] +\]" src="form_1617.png"/>

    Definition at line 121 of file grad_div.h.

    @@ -284,14 +284,14 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u \cdot \mathbf n- \mathbf g \cdot
 \mathbf n) (\mathbf v \cdot \mathbf n)
 - \nabla \cdot \mathbf u (\mathbf v \cdot \mathbf n)
 - (\mathbf u-\mathbf g) \cdot \mathbf n \nabla \cdot v\Bigr)\;ds.
-\] +\]" src="form_1618.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 173 of file grad_div.h.

    @@ -418,12 +418,12 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Grad-div residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u \cdot\mathbf n]
 \cdot[\mathbf v \cdot \mathbf n]
 - \{\nabla \cdot \mathbf u\}[\mathbf v\cdot \mathbf n]
 - [\mathbf u\times \mathbf n]\{\nabla\cdot \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1619.png"/>

    See for instance Hansbo and Larson, 2002

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-11-15 06:44:27.159651300 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1L2.html 2024-11-15 06:44:27.159651300 +0000 @@ -249,7 +249,7 @@ - +
    resultThe vector obtained as result.
    feThe FEValues object describing the local trial function space. update_values and update_JxW_values must be set.
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    inputThe representation of $f$ evaluated at the quadrature points in the finite element (size must be equal to the number of quadrature points in the element).
    factorA constant that multiplies the result.
    @@ -351,7 +351,7 @@ const double factor2 = 1.&#href_anchor"memdoc"> -

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    +

    The jump matrix between two cells for scalar or vector values finite elements. Note that the factor $\gamma$ can be used to implement weighted jumps.

    \[ \int_F [\gamma u][\gamma v]\,ds \quad \text{or}
 \int_F [\gamma \mathbf u]\cdot [\gamma \mathbf v]\,ds \]

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-11-15 06:44:27.183651515 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Laplace.html 2024-11-15 06:44:27.183651515 +0000 @@ -165,8 +165,8 @@ const double factor = 1.&#href_anchor"memdoc">

    Laplacian in weak form, namely on the cell Z the matrix

    -\[
-\int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[
+\int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    The FiniteElement in fe may be scalar or vector valued. In the latter case, the Laplacian is applied to each component separately.

    @@ -210,7 +210,7 @@

    Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \] +\[ \int_Z \nu \nabla u \cdot \nabla v \, dx. \]

    Definition at line 91 of file laplace.h.

    @@ -253,7 +253,7 @@

    Vector-valued Laplacian residual operator in weak form

    -\[ \int_Z \nu \nabla u : \nabla v \, dx. \] +\[ \int_Z \nu \nabla u : \nabla v \, dx. \]

    Definition at line 118 of file laplace.h.

    @@ -288,11 +288,11 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u v - \partial_n u v - u \partial_n v\Bigr)\;ds.
-\] +\]" src="form_1632.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 156 of file laplace.h.

    @@ -326,12 +326,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition of Nitsche type for the Laplacian applied to the tangential component only, namely on the face F the matrix

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma u_\tau v_\tau - \partial_n u_\tau v_\tau - u_\tau
 \partial_n v_\tau\Bigr)\;ds.
-\] +\]" src="form_1633.png"/>

    -

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    +

    Here, $\gamma$ is the penalty parameter suitably computed with compute_penalty().

    Definition at line 197 of file laplace.h.

    @@ -380,12 +380,12 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, scalar version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (u-g) v - \partial_n u v - (u-g) \partial_n
 v\Bigr)\;ds.
-\] +\]" src="form_1634.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 260 of file laplace.h.

    @@ -434,13 +434,13 @@ double factor = 1.&#href_anchor"memdoc">

    Weak boundary condition for the Laplace operator by Nitsche, vector valued version, namely on the face F the vector

    -\[
+<picture><source srcset=\[
 \int_F \Bigl(\gamma (\mathbf u- \mathbf g) \cdot \mathbf v
 - \partial_n \mathbf u \cdot \mathbf v
 - (\mathbf u-\mathbf g) \cdot \partial_n \mathbf v\Bigr)\;ds.
-\] +\]" src="form_1635.png"/>

    -

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    +

    Here, u is the finite element function whose values and gradient are given in the arguments input and Dinput, respectively. g is the inhomogeneous boundary value in the argument data. $\gamma$ is the usual penalty parameter.

    Definition at line 307 of file laplace.h.

    @@ -499,10 +499,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    The penalty parameter should always be the mean value of the penalties needed for stability on each side. In the case of constant coefficients, it can be computed using compute_penalty().

    If factor2 is missing or negative, the factor is assumed the same on both sides. If factors differ, note that the penalty parameter has to be computed accordingly.

    @@ -564,10 +564,10 @@ double factor2 = -1.&#href_anchor"memdoc">

    Flux for the interior penalty method for the Laplacian applied to the tangential components of a vector field, namely on the face F the matrices associated with the bilinear form

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u_\tau][v_\tau] - \{\nabla u_\tau\}[v_\tau\mathbf
 n] - [u_\tau\mathbf n]\{\nabla v_\tau\} \Bigr) \; ds.
-\] +\]" src="form_1637.png"/>

    Warning
    This function is still under development!
    @@ -638,10 +638,10 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [u][v] - \{\nabla u\}[v\mathbf n] - [u\mathbf
 n]\{\nabla v\} \Bigr) \; ds.
-\] +\]" src="form_1636.png"/>

    Definition at line 543 of file laplace.h.

    @@ -711,11 +711,11 @@ double ext_factor = -1.&#href_anchor"memdoc">

    Vector-valued residual term for the symmetric interior penalty method:

    -\[
+<picture><source srcset=\[
 \int_F \Bigl( \gamma [\mathbf u]\cdot[\mathbf v]
 - \{\nabla \mathbf u\}[\mathbf v\otimes \mathbf n]
 - [\mathbf u\otimes \mathbf n]\{\nabla \mathbf v\} \Bigr) \; ds.
-\] +\]" src="form_1638.png"/>

    Definition at line 610 of file laplace.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-11-15 06:44:27.203651694 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceLocalIntegrators_1_1Maxwell.html 2024-11-15 06:44:27.203651694 +0000 @@ -133,22 +133,22 @@

    Local integrators related to curl operators and their traces.

    We use the following conventions for curl operators. First, in three space dimensions

    -\[
+<picture><source srcset=\[
 \nabla\times \mathbf u = \begin{pmatrix}
   \partial_2 u_3 - \partial_3 u_2 \\
   \partial_3 u_1 - \partial_1 u_3 \\
   \partial_1 u_2 - \partial_2 u_1
 \end{pmatrix}.
-\] +\]" src="form_1641.png"/>

    -

    In two space dimensions, the curl is obtained by extending a vector u to $(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    -\[
+<p>In two space dimensions, the curl is obtained by extending a vector <b>u</b> to <picture><source srcset=$(u_1, u_2, 0)^T$ and a scalar p to $(0,0,p)^T$. Computing the nonzero components, we obtain the scalar curl of a vector function and the vector curl of a scalar function. The current implementation exchanges the sign and we have:

    +\[
  \nabla \times \mathbf u = \partial_1 u_2 - \partial_2 u_1,
  \qquad
  \nabla \times p = \begin{pmatrix}
    \partial_2 p \\ -\partial_1 p
  \end{pmatrix}
-\] +\]" src="form_1644.png"/>

    Function Documentation

    @@ -174,15 +174,15 @@ const Tensor< 2, dim > & h2&#href_anchor"memdoc">

    Auxiliary function. Given the tensors of dim second derivatives, compute the curl of the curl of a vector function. The result in two and three dimensions is:

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 - \partial_2^2 u_1 \\
 \partial_1\partial_2 u_1 - \partial_1^2 u_2
 \end{pmatrix}
-\] +\]" src="form_1645.png"/>

    and

    -\[
+<picture><source srcset=\[
 \nabla\times\nabla\times \mathbf u = \begin{pmatrix}
 \partial_1\partial_2 u_2 + \partial_1\partial_3 u_3
 - (\partial_2^2+\partial_3^2) u_1 \\
@@ -191,7 +191,7 @@
 \partial_3\partial_1 u_1 + \partial_3\partial_2 u_2
 - (\partial_1^2+\partial_2^2) u_3
 \end{pmatrix}.
-\] +\]" src="form_1646.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -227,9 +227,9 @@ const Tensor< 1, dim > & normal&#href_anchor"memdoc">

    Auxiliary function. Given dim tensors of first derivatives and a normal vector, compute the tangential curl

    -\[
+<picture><source srcset=\[
 \mathbf n \times \nabla \times u.
-\] +\]" src="form_1647.png"/>

    Note
    The third tensor argument is not used in two dimensions and can for instance duplicate one of the previous.
    @@ -260,10 +260,10 @@ const double factor = 1.&#href_anchor"memdoc">

    The curl-curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla\times u \cdot
 \nabla \times v \,dx
-\] +\]" src="form_1648.png"/>

    in weak form.

    @@ -299,9 +299,9 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for the curl operator

    -\[
+<picture><source srcset=\[
 \int_Z \nabla \times u \cdot v \,dx.
-\] +\]" src="form_1649.png"/>

    This is the standard curl operator in 3d and the scalar curl in 2d. The vector curl operator can be obtained by exchanging test and trial functions.

    @@ -343,14 +343,14 @@ double factor = 1.&#href_anchor"memdoc">

    The matrix for weak boundary condition of Nitsche type for the tangential component in Maxwell systems.

    -\[
+<picture><source srcset=\[
 \int_F \biggl( 2\gamma
 (u\times n) (v\times n) -
 (u\times n)(\nu \nabla\times
 v) - (v\times
 n)(\nu \nabla\times u)
 \biggr)
-\] +\]" src="form_1650.png"/>

    Definition at line 266 of file maxwell.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-11-15 06:44:27.231651944 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching.html 2024-11-15 06:44:27.231651944 +0000 @@ -175,8 +175,8 @@
    -

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    -

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    +

    Type describing how a cell or a face is located relative to the zero contour of a level set function, $\psi$. The values of the type correspond to:

    +

    inside if $\psi(x) < 0$, outside if $\psi(x) > 0$, intersected if $\psi(x)$ varies in sign,

    over the cell/face. The value "unassigned" is used to describe that the location of a cell/face has not yet been determined.

    @@ -242,17 +242,17 @@
    Enumerator
    inside 
    const AffineConstraints< number > & immersed_constraints = AffineConstraints<number>()&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2100.png"/>

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    +

    The sparsity is filled by locating the position of quadrature points (obtained by the reference quadrature quad) defined on elements of $B$ with respect to the embedding triangulation $\Omega$. For each overlapping cell, the entries corresponding to space_comps in space_dh and immersed_comps in immersed_dh are added to the sparsity pattern.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sens for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -370,17 +370,17 @@
    const AffineConstraints< typename Matrix::value_type > & immersed_constraints = AffineConstraints<typename&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching, overlapping grids.

    -

    Given two non-matching triangulations, representing the domains $\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
-\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    -\[
+<p>Given two non-matching triangulations, representing the domains <picture><source srcset=$\Omega$ and $B$, with $B \subseteq \Omega$, and two finite element spaces $V(\Omega) = \text{span}\{v_i\}_{i=0}^n$ and $Q(B) =
+\text{span}\{w_j\}_{j=0}^m$, compute the coupling matrix

    +\[
 M_{ij} \dealcoloneq \int_{B} v_i(x) w_j(x) dx,
                     \quad i \in [0,n), j \in [0,m),
-\] +\]" src="form_2100.png"/>

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    -

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh passed to this function (or part of it, if specified in space_comps), while $Q(B)$ is the finite element space associated with the immersed_dh passed to this function (or part of it, if specified in immersed_comps).

    +

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern function. The elements of the matrix are computed by locating the position of quadrature points defined on elements of $B$ with respect to the embedding triangulation $\Omega$.

    The space_comps and immersed_comps masks are assumed to be ordered in the same way: the first component of space_comps will couple with the first component of immersed_comps, the second with the second, and so on. If one of the two masks has more non-zero entries non-zero than the other, then the excess components will be ignored.

    -

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    +

    If the domain $B$ does not fall within $\Omega$, an exception will be thrown by the algorithm that computes the quadrature point locations. In particular, notice that this function only makes sense for dim1 lower or equal than dim0. A static assert guards that this is actually the case.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that the immersed triangulation is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if you use an immersed parallel::distributed::Triangulation<dim1,spacedim>.

    See the tutorial program step-60 for an example on how to use this function.

    @@ -504,16 +504,16 @@
    const ComponentMask & comps1 = {}&#href_anchor"memdoc">

    Create a coupling sparsity pattern for non-matching independent grids, using a convolution kernel with compact support of radius epsilon.

    -

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
-\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    +

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) =
+\text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the sparsity pattern that would be necessary to assemble the matrix

    -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    This function will also work in parallel, provided that at least one of the triangulations is of type parallel::shared::Triangulation<dim1,spacedim>. An exception is thrown if both triagnulations are of type parallel::distributed::Triangulation<dim1,spacedim>.

    @@ -588,15 +588,15 @@
    const ComponentMask & comps1 = {}&#href_anchor"memdoc">

    Create a coupling mass matrix for non-matching independent grids, using a convolution kernel with compact support.

    -

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

    +

    Given two non-matching triangulations, representing the domains $\Omega^0$ and $\Omega^1$, both embedded in $\mathbb{R}^d$, and two finite element spaces $V^0(\Omega^0) = \text{span}\{v_i\}_{i=0}^n$ and $V^1(\Omega^1) = \text{span}\{w_\alpha\}_{\alpha=0}^m$, compute the matrix

    -\[
+<picture><source srcset=\[
 M_{i\alpha} \dealcoloneq \int_{\Omega^0} \int_{\Omega^1}
 v_i(x) K^{\epsilon}(x-y) w_\alpha(y) dx \ dy,
 \quad i \in [0,n), \alpha \in [0,m),
-\] +\]" src="form_2108.png"/>

    -

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    +

    where $V^0(\Omega^0)$ is the finite element space associated with the dh0 passed to this function (or part of it, if specified in comps0), while $V^1(\Omega^1)$ is the finite element space associated with the dh1 passed to this function (or part of it, if specified in comps1), and $K^\epsilon$ is a function derived from CutOffFunctionBase with compact support included in a ball of radius $\epsilon$.

    The corresponding sparsity patterns can be computed by calling the make_coupling_sparsity_pattern() function.

    The comps0 and comps1 masks are assumed to be ordered in the same way: the first component of comps0 will couple with the first component of comps1, the second with the second, and so on. If one of the two masks has more active components than the other, then the excess components will be ignored.

    For both spaces, it is possible to specify a custom Mapping, which defaults to StaticMappingQ1 for both.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-11-15 06:44:27.259652193 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceNonMatching_1_1internal_1_1QuadratureGeneratorImplementation.html 2024-11-15 06:44:27.259652193 +0000 @@ -292,7 +292,7 @@
    -

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    +

    Returns the max/min bounds on the value, taken over all the entries in the incoming vector of FunctionBounds. That is, given the incoming function bounds, $[L_j, U_j]$, this function returns $[L, U]$, where $L = \min_{j} L_j$ and $U = \max_{j} U_j$.

    Definition at line 202 of file quadrature_generator.cc.

    @@ -314,20 +314,20 @@
    -

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    -

    Let $J_I$ be the index set of the indefinite functions:

    -

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    -

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    -

    $|\partial_k \psi_j| > L_{jk}$.

    -

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    +

    Finds the best choice of height function direction, given the FunctionBounds for a number of functions $\{\psi_j\}_{j=0}^{n-1}$. Here, "best" is meant in the sense of the implicit function theorem.

    +

    Let $J_I$ be the index set of the indefinite functions:

    +

    $J_I = \{0,..., n - 1\} \setminus \{ j : |\psi_j| > 0 \}$.

    +

    This function converts the incoming bounds to a lower bound, $L_{ij}$, on the absolute value of each component of the gradient:

    +

    $|\partial_k \psi_j| > L_{jk}$.

    +

    and then returns a coordinate direction, $i$, and a lower bound $L$, such that

    -\[
+<picture><source srcset=\[
 i = \arg \max_{k} \min_{j \in J_I} L_{jk}, \\
 L =      \max_{k} \min_{j \in J_I} L_{jk}.
-\] +\]" src="form_2222.png"/>

    -

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    -

    $|\partial_i \psi_j| > L$.

    +

    This means $i$ is a coordinate direction such that all functions intersected by the zero contour (i.e. those belonging to $J_I$) fulfill

    +

    $|\partial_i \psi_j| > L$.

    Note that the estimated lower bound, $L$, can be zero or negative. This means that no suitable height function direction exists. If all of the incoming functions are positive or negative definite the returned std::optional is non-set.

    Definition at line 276 of file quadrature_generator.cc.

    @@ -399,7 +399,7 @@ std::pair< double, double > & value_bounds&#href_anchor"memdoc"> -

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    +

    Given the incoming lower and upper bounds on the value of a function $[L, U]$, return the minimum/maximum of $[L, U]$ and the function values at the vertices. That is, this function returns

    $[\min(L, L_f), \max(U, U_f)]$,

    where $L_f = \min_{v} f(x_v)$, $U_f = \max_{v} f(x_v)|$, and $x_v$ is a vertex.

    It is assumed that the incoming function is scalar valued.

    @@ -487,7 +487,7 @@
    -

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    +

    Return a lower bound, $L_a$, on the absolute value of a function, $f(x)$:

    $L_a \leq |f(x)|$,

    by estimating it from the incoming lower and upper bounds: $L \leq f(x) \leq U$.

    By rewriting the lower and upper bounds as $F - C \leq f(x) \leq F + C$, where $L = F - C$, $U = F + C$ (or $F = (U + L)/2$, $C = (U - L)/2$), we get $|f(x) - F| \leq C$. Using the inverse triangle inequality gives $|F| - |f(x)| \leq |f(x) - F| \leq C$. Thus, $L_a = |F| - C$.

    @@ -676,7 +676,7 @@ QPartitioning< dim > & q_partitioning&#href_anchor"memdoc"> -

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    +

    Let $\{ y_0, ..., y_{n+1} \}$ be such that $[y_0, y_{n+1}]$ is the interval and $\{ y_1, ..., y_n \}$ are the roots. In each subinterval, $[y_i, y_{i+1}]$, distribute point according to the 1D-quadrature rule $\{(x_q, w_q)\}_q$ (quadrature1D). Take the tensor product with the quadrature point $(x, w)$ (point, weight) to create dim-dimensional quadrature points

    \[
 X_q = x_I \times (y_i + (y_{i+1} - y_i) x_q),
 W_q = w_I (y_{i+1} - y_i) w_q,
@@ -761,7 +761,7 @@
           <td></td>
           <td class=const std::optional< HeightDirectionData > & height_direction_data&#href_anchor"memdoc">

    Return the coordinate direction that the box should be split in, assuming that the box should be split it half.

    -

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    +

    If the box is larger in one coordante direction, this direction is returned. If the box have the same extent in all directions, we choose the coordinate direction which is closest to being a height-function direction. That is, the direction $i$ that has a least negative estimate of $|\partial_i \psi_j|$. As a last resort, we choose the direction 0, if height_direction_data non-set.

    Definition at line 1018 of file quadrature_generator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-11-15 06:44:27.275652336 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceParticles_1_1Utilities.html 2024-11-15 06:44:27.279652372 +0000 @@ -154,12 +154,12 @@ const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation sparsity pattern for particles.

    -

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    +

    Given a triangulation representing the domain $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern that would be necessary to assemble the matrix

    \[
 M_{i,j} \dealcoloneq v_j(x_i) ,
 \]

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    \[
@@ -167,8 +167,8 @@
 \]

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    +

    The sparsity is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the sparsity will be empty.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::add_entries_local_to_global() is used to fill the final sparsity pattern.

    Definition at line 31 of file utilities.cc.

    @@ -205,12 +205,12 @@ const ComponentMask & space_comps = {}&#href_anchor"memdoc">

    Create an interpolation matrix for particles.

    -

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    +

    Given a triangulation representing the domains $\Omega$, a particle handler of particles in $\Omega$, and a scalar finite element space $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix

    \[
 M_{ij} \dealcoloneq v_j(x_i) ,
 \]

    -

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    +

    where $V(\Omega)$ is the finite element space associated with the space_dh, and the index i is given by the particle id whose position is x_i.

    In the case of vector valued finite element spaces, the components on which interpolation must be performed can be selected using a component mask. Only primitive finite element spaces are supported.

    When selecting more than one component, the resulting sparsity will have dimension equal to particle_handler.n_global_particles() * mask.n_selected_components() times space_dh.n_dofs(), and the corresponding matrix entries are given by

    \[
@@ -218,8 +218,8 @@
 \]

    where comp_j is the only non zero component of the vector valued basis function v_j (equal to fe.system_to_component_index(j).first), k corresponds to its index within the selected components of the mask, and $e_{comp_j}$ is the unit vector in the direction comp_j.

    -

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    -

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    +

    The matrix is filled by locating the position of the particle with index i within the particle handler with respect to the embedding triangulation $\Omega$, and coupling it with all the local degrees of freedom specified in the component mask space_comps, following the ordering in which they are selected in the mask space_comps.

    +

    If a particle does not fall within $\Omega$, it is ignored, and the corresponding rows of the matrix will be zero.

    Constraints of the form supported by the AffineConstraints class may be supplied with the constraints argument. The method AffineConstraints::distribute_local_to_global() is used to distribute the entries of the matrix to respect the given constraints.

    Definition at line 113 of file utilities.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-11-15 06:44:27.307652622 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Notation_1_1Kelvin.html 2024-11-15 06:44:27.307652622 +0000 @@ -196,7 +196,7 @@ \end{array} \right] , \]" src="form_2555.png"/>

    -

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    +

    where $n$ denotes the Kelvin index for the tensor component, while for a general rank-2 tensor $\mathbf{T}$

    \[
 \mathbf{T} \dealcoloneq \left[ \begin{array}{ccc}
  T_{00} & T_{01} & T_{02} \\
@@ -573,7 +573,7 @@
 <div class=

    Definition full_matrix.h:79
    Definition tensor.h:471
    -

    the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

    +

    the matrix mtrx_1 will have $dim \times dim$ rows and $dim$ columns (i.e. size Tensor<2,dim>::n_independent_components $\times$ Tensor<1,dim>::n_independent_components), while those of the matrix mtrx_2 will have $dim$ rows and $(dim \times dim + dim)/2$ columns (i.e. size Tensor<1,dim>::n_independent_components $\times$ SymmetricTensor<2,dim>::n_independent_components), as it is assumed that the entries corresponding to the alternation of the second and third indices are equal. That is to say that r3_symm_tnsr[i][j][k] == r3_symm_tnsr[i][k][j].

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-11-15 06:44:27.327652801 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations.html 2024-11-15 06:44:27.327652801 +0000 @@ -136,7 +136,7 @@ &#href_anchor"details" id="details">

    Detailed Description

    A collection of operations to assist in the transformation of tensor quantities from the reference to spatial configuration, and vice versa. These types of transformation are typically used to re-express quantities measured or computed in one configuration in terms of a second configuration.

    Notation

    -

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    +

    We will use the same notation for the coordinates $\mathbf{X}, \mathbf{x}$, transformations $\varphi$, differential operator $\nabla_{0}$ and deformation gradient $\mathbf{F}$ as discussed for namespace Physics::Elasticity.

    As a further point on notation, we will follow Holzapfel (2007) and denote the push forward transformation as $\chi\left(\bullet\right)$ and the pull back transformation as $\chi^{-1}\left(\bullet\right)$. We will also use the annotation $\left(\bullet\right)^{\sharp}$ to indicate that a tensor $\left(\bullet\right)$ is a contravariant tensor, and $\left(\bullet\right)^{\flat}$ that it is covariant. In other words, these indices do not actually change the tensor, they just indicate the kind of object a particular tensor is.

    Note
    For these transformations, unless otherwise stated, we will strictly assume that all indices of the transformed tensors derive from one coordinate system; that is to say that they are not multi-point tensors (such as the Piola stress in elasticity).

    Function Documentation

    @@ -157,24 +157,24 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc"> -

    Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

    +

    Return the result of applying Nanson's formula for the transformation of the material surface area element $d\mathbf{A}$ to the current surfaces area element $d\mathbf{a}$ under the nonlinear transformation map $\mathbf{x} = \boldsymbol{\varphi} \left( \mathbf{X} \right)$.

    The returned result is the spatial normal scaled by the ratio of areas between the reference and spatial surface elements, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{n} \frac{da}{dA}
  \dealcoloneq \textrm{det} \mathbf{F} \, \mathbf{F}^{-T} \cdot \mathbf{N}
  = \textrm{cof} \mathbf{F} \cdot \mathbf{N} \, .
-\] +\]" src="form_2593.png"/>

    Parameters
    - - + +
    [in]NThe referential normal unit vector $\mathbf{N}$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]NThe referential normal unit vector $\mathbf{N}$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    The scaled spatial normal vector $\mathbf{n}
-\frac{da}{dA}$
    +
    Returns
    The scaled spatial normal vector $\mathbf{n}
+\frac{da}{dA}$
    Note
    For a discussion of the background of this function, see G. A. Holzapfel: "Nonlinear solid mechanics. A Continuum Approach for Engineering" (2007), and in particular formula (2.55) on p. 75 (or thereabouts).
    For a discussion of the background of this function, see P. Wriggers: "Nonlinear finite element methods" (2008), and in particular formula (3.11) on p. 23 (or thereabouts).
    @@ -199,18 +199,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

    Return a vector with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{V}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{V}
-\] +\]" src="form_2596.png"/>

    Parameters
    - - + +
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]VThe vector to be transformed $\mathbf{V}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{V}^{\prime}$
    +
    Returns
    $\mathbf{V}^{\prime}$
    @@ -232,19 +232,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

    Return a rank-2 tensor with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2598.png"/>

    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{T}^{\prime}$
    +
    Returns
    $\mathbf{T}^{\prime}$
    @@ -266,19 +266,19 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

    Return a symmetric rank-2 tensor with a changed basis, i.e.

    -\[
+<picture><source srcset=\[
  \mathbf{T}^{\prime} \dealcoloneq \mathbf{B} \cdot \mathbf{T} \cdot
 \mathbf{B}^{T}
-\] +\]" src="form_2598.png"/>

    Parameters
    - +
    [in]TThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{T}^{\prime}$
    +
    Returns
    $\mathbf{T}^{\prime}$
    @@ -300,18 +300,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

    Return a rank-4 tensor with a changed basis, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2600.png"/>

    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{H}^{\prime}$
    +
    Returns
    $\mathbf{H}^{\prime}$
    @@ -333,18 +333,18 @@ const Tensor< 2, dim, Number > & B&#href_anchor"memdoc">

    Return a symmetric rank-4 tensor with a changed basis, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  H_{ijkl}^{\prime} \dealcoloneq B_{iI} B_{jJ} H_{IJKL} B_{kK} B_{lL}
-\] +\]" src="form_2600.png"/>

    Parameters
    - +
    [in]HThe tensor to be transformed $\mathbf{T}$
    [in]BThe transformation matrix $\mathbf{B}$
    [in]BThe transformation matrix $\mathbf{B}$
    -
    Returns
    $\mathbf{H}^{\prime}$
    +
    Returns
    $\mathbf{H}^{\prime}$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-11-15 06:44:27.347652980 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Contravariant.html 2024-11-15 06:44:27.347652980 +0000 @@ -133,16 +133,16 @@ &#href_anchor"memitem:af70b1a5907ac2a88ab2a053dfb055dbe" id="r_af70b1a5907ac2a88ab2a053dfb055dbe">template<int dim, typename Number > SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F) &#href_anchor"details" id="details">

    Detailed Description

    -

    Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    -\[
+<div class=

    Transformation of tensors that are defined in terms of a set of contravariant bases. Rank-1 and rank-2 contravariant tensors $\left(\bullet\right)^{\sharp} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    +\[
    \int_{V_{0}} \nabla_{0} \cdot \mathbf{T} \; dV
      = \int_{\partial V_{0}} \mathbf{T} \cdot \mathbf{N} \; dA
      = \int_{\partial V_{t}} \mathbf{T} \cdot \mathbf{n} \; da
      = \int_{V_{t}} \nabla \cdot \mathbf{t} \; dv
-\] +\]" src="form_2576.png"/>

    -

    where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
-V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

    +

    where $V_{0}$ and $V_{t}$ are respectively control volumes in the reference and spatial configurations, and their surfaces $\partial
+V_{0}$ and $\partial V_{t}$ have the outwards facing normals $\mathbf{N}$ and $\mathbf{n}$.

    Function Documentation

    ◆ push_forward() [1/5]

    @@ -170,8 +170,8 @@
    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -206,8 +206,8 @@
    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -242,8 +242,8 @@
    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -278,8 +278,8 @@
    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -314,8 +314,8 @@
    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -349,8 +349,8 @@
    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -376,21 +376,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2615.png"/>

    Parameters
    - +
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    @@ -412,21 +412,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \mathbf{F}^{-1} \cdot \left(\bullet\right)^{\sharp}
    \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2615.png"/>

    Parameters
    - +
    [in]tThe (spatial) symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    @@ -448,21 +448,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\sharp} \right]_{IJKL}
    \dealcoloneq F^{-1}_{Ii} F^{-1}_{Jj}
 \left(\bullet\right)^{\sharp}_{ijkl} F^{-1}_{Kk} F^{-1}_{Ll}
-\] +\]" src="form_2617.png"/>

    Parameters
    - +
    [in]hThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{h} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{h} \right)$
    @@ -484,21 +484,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi^{-1}\left(\bullet\right)^{\sharp} \right]_{IJKL}
    \dealcoloneq F^{-1}_{Ii} F^{-1}_{Jj}
    \left(\bullet\right)^{\sharp}_{ijkl} F^{-1}_{Kk} F^{-1}_{Ll}
-\] +\]" src="form_2619.png"/>

    Parameters
    - +
    [in]hThe (spatial) symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{h} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{h} \right)$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-11-15 06:44:27.367653158 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Covariant.html 2024-11-15 06:44:27.367653158 +0000 @@ -133,16 +133,16 @@ &#href_anchor"memitem:a138fff54a44ba86bc2d1a6200b148e90" id="r_a138fff54a44ba86bc2d1a6200b148e90">template<int dim, typename Number > SymmetricTensor< 4, dim, Number >&#href_anchor"memTemplItemRight" valign="bottom">pull_back (const SymmetricTensor< 4, dim, Number > &h, const Tensor< 2, dim, Number > &F) &#href_anchor"details" id="details">

    Detailed Description

    -

    Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    -\[
+<div class=

    Transformation of tensors that are defined in terms of a set of covariant basis vectors. Rank-1 and rank-2 covariant tensors $\left(\bullet\right)^{\flat} = \mathbf{T}$ (and its spatial counterpart $\mathbf{t}$) typically satisfy the relation

    +\[
    \int_{\partial V_{0}} \left[ \nabla_{0} \times \mathbf{T} \right]
 \cdot \mathbf{N} \; dA = \oint_{\partial A_{0}} \mathbf{T} \cdot
 \mathbf{L} \; dL = \oint_{\partial A_{t}} \mathbf{t} \cdot \mathbf{l} \;
 dl = \int_{\partial V_{t}} \left[ \nabla \times \mathbf{t} \right] \cdot
 \mathbf{n} \; da
-\] +\]" src="form_2584.png"/>

    -

    where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

    +

    where the control surfaces $\partial V_{0}$ and $\partial V_{t}$ with outwards facing normals $\mathbf{N}$ and $\mathbf{n}$ are bounded by the curves $\partial A_{0}$ and $\partial A_{t}$ that are, respectively, associated with the line directors $\mathbf{L}$ and $\mathbf{l}$.

    Function Documentation

    ◆ push_forward() [1/5]

    @@ -162,16 +162,16 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2620.png"/>

    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -197,17 +197,17 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -233,17 +233,17 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{-T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}^{-1}
-\] +\]" src="form_2621.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -269,17 +269,17 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -305,17 +305,17 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 covariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \left[ \chi\left(\bullet\right)^{\flat} \right]_{ijkl}
    \dealcoloneq F^{-T}_{iI} F^{-T}_{jJ}
    \left(\bullet\right)^{\flat}_{IJKL} F^{-T}_{kK} F^{-T}_{lL}
-\] +\]" src="form_2622.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -341,16 +341,16 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a covariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
-\] +\]" src="form_2623.png"/>

    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    @@ -376,21 +376,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat} \cdot
 \mathbf{F}
-\] +\]" src="form_2624.png"/>

    Parameters
    - +
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    +
    Returns
    $\chi^{-1}\left( \mathbf{t} \right)$
    @@ -412,21 +412,21 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 covariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \chi^{-1}\left(\bullet\right)^{\flat}
    \dealcoloneq \mathbf{F}^{T} \cdot \left(\bullet\right)^{\flat}
    \cdot \mathbf{F}
-\] +\]" src="form_2625.png"/>

    Parameters
    - +
    [in]tThe (spatial) symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-11-15 06:44:27.387653337 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1Transformations_1_1Piola.html 2024-11-15 06:44:27.387653337 +0000 @@ -153,22 +153,22 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
  \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
  \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2627.png"/>

    Parameters
    - +
    [in]VThe (referential) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{V} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{V} \right)$
    @@ -190,22 +190,22 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -227,22 +227,22 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-2 contravariant symmetric tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \chi\left(\bullet\right)^{\sharp}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; \mathbf{F} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{T}
-\] +\]" src="form_2629.png"/>

    Parameters
    - +
    [in]TThe (referential) rank-2 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{T} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{T} \right)$
    @@ -264,23 +264,23 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -302,23 +302,23 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the push forward transformation on a rank-4 contravariant symmetric tensor, i.e. (in index notation):

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F}^{-1} \; \left[
 \chi\left(\bullet\right)^{\sharp} \right]_{ijkl}
    \dealcoloneq \frac{1}{\textrm{det} \mathbf{F}} \; F_{iI} F_{jJ}
 \left(\bullet\right)^{\sharp}_{IJKL} F_{kK} F_{lL}
-\] +\]" src="form_2631.png"/>

    Parameters
    - +
    [in]HThe (referential) rank-4 symmetric tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
-\mathbf{H} \right)$
    +
    Returns
    $\frac{1}{\textrm{det} \mathbf{F}} \; \chi\left(
+\mathbf{H} \right)$
    @@ -340,22 +340,22 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a contravariant vector, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp}
-\] +\]" src="form_2633.png"/>

    Parameters
    - +
    [in]vThe (spatial) vector to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    -
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
-\right)$
    +
    Returns
    $\textrm{det} \mathbf{F} \; \chi^{-1}\left( \mathbf{v}
+\right)$
    @@ -377,22 +377,22 @@ const Tensor< 2, dim, Number > & F&#href_anchor"memdoc">

    Return the result of the pull back transformation on a rank-2 contravariant tensor, i.e.

    -\[
+<picture><source srcset=\[
  \textrm{det} \mathbf{F} \; \chi^{-1}\left(\bullet\right)^{\sharp}
    \dealcoloneq \textrm{det} \mathbf{F} \; \mathbf{F}^{-1} \cdot
 \left(\bullet\right)^{\sharp} \cdot \mathbf{F}^{-T}
-\] +\]" src="form_2635.png"/>

    Parameters
    - +
    [in]tThe (spatial) tensor to be operated on
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
-\mathbf{X} \right)$
    [in]FThe deformation gradient tensor $\mathbf{F} \left(
+\mathbf{X} \right)$
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-11-15 06:44:27.399653444 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacePhysics_1_1VectorRelations.html 2024-11-15 06:44:27.399653444 +0000 @@ -136,11 +136,11 @@ const Tensor< 1, spacedim, Number > & b&#href_anchor"memdoc"> -

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    +

    Calculate the angle $\theta$ between two vectors a and b. The returned angle will be in the range $[0, \pi]$.

    This function uses the geometric definition of the scalar product.

    -\[
+<picture><source srcset=\[
   \vec{a} \cdot \vec{b} = \|\vec{a}\| \|\vec{b}\| \cos(\theta)
-\] +\]" src="form_2640.png"/>

    @@ -168,20 +168,20 @@ const Tensor< 1, spacedim, Number > & axis&#href_anchor"memdoc">

    Calculate the angle $\theta$ between two vectors a and b, where both vectors are located in a plane described by a normal vector axis.

    -

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    +

    The angle computed by this function corresponds to the rotation angle that would transform the vector a into the vector b around the vector axis. Thus, contrary to the function above, we get a signed angle which will be in the range $[-\pi, \pi]$.

    The vector axis needs to be a unit vector and be perpendicular to both vectors a and b.

    This function uses the geometric definitions of both the scalar and cross product.

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \vec{a} \cdot  \vec{b} &= \|\vec{a}\| \|\vec{b}\| \cos(\theta) \\
   \vec{a} \times \vec{b} &= \|\vec{a}\| \|\vec{b}\| \sin(\theta) \vec{n}
-\end{align*} +\end{align*}" src="form_2642.png"/>

    We can create the tangent of the angle using both products.

    -\[
+<picture><source srcset=\[
   \tan{\theta}
   = \frac{\sin(\theta)}{\cos(theta)}
   = \frac{(\vec{a} \times \vec{b}) \cdot \vec{n}}{\vec{a} \cdot \vec{b}}
-\] +\]" src="form_2643.png"/>

    Note
    Only applicable for three-dimensional vectors spacedim == 3.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-11-15 06:44:27.419653623 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSLEPcWrappers.html 2024-11-15 06:44:27.419653623 +0000 @@ -123,13 +123,13 @@ &#href_anchor"memitem:">class  TransformationSpectrumFolding &#href_anchor"details" id="details">

    Detailed Description

    Base namespace for solver classes using the SLEPc solvers which are selected based on flags passed to the eigenvalue problem solver context. Derived classes set the right flags to set the right solver.

    -

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    +

    The SLEPc solvers are intended to be used for solving the generalized eigenspectrum problem $(A-\lambda B)x=0$, for $x\neq0$; where $A$ is a system matrix, $B$ is a mass matrix, and $\lambda, x$ are a set of eigenvalues and eigenvectors respectively. The emphasis is on methods and techniques appropriate for problems in which the associated matrices are sparse. Most of the methods offered by the SLEPc library are projection methods or other methods with similar properties; and wrappers are provided to interface to SLEPc solvers that handle both of these problem sets.

    SLEPcWrappers can be implemented in application codes in the following way:

    SolverControl solver_control (1000, 1e-9);
    SolverArnoldi system (solver_control, mpi_communicator);
    system.solve (A, B, lambda, x, size_of_spectrum);
    -

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    +

    for the generalized eigenvalue problem $Ax=B\lambda x$, where the variable const unsigned int size_of_spectrum tells SLEPc the number of eigenvector/eigenvalue pairs to solve for. Additional options and solver parameters can be passed to the SLEPc solvers before calling solve(). For example, if the matrices of the general eigenspectrum problem are not hermitian and the lower eigenvalues are wanted only, the following code can be implemented before calling solve():

    system.set_problem_type (EPS_NHEP);
    system.set_which_eigenpairs (EPS_SMALLEST_REAL);

    These options can also be set at the command line.

    See also step-36 for a hands-on example.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-11-15 06:44:27.431653730 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSUNDIALS.html 2024-11-15 06:44:27.431653730 +0000 @@ -192,7 +192,7 @@
    const VectorType &b,
    double tol)>

    Type of function objects to interface with SUNDIALS' linear solvers

    -

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    +

    This function type encapsulates the action of solving $P^{-1}Ax=P^{-1}b$. The LinearOperator op encapsulates the matrix vector product $Ax$ and the LinearOperator prec encapsulates the application of the preconditioner $P^{-1}z$. The user can specify function objects of this type to attach custom linear solver routines to SUNDIALS. The two LinearOperators op and prec are built internally by SUNDIALS based on user settings. The parameters are interpreted as follows:

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-11-15 06:44:27.451653908 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Fourier.html 2024-11-15 06:44:27.451653908 +0000 @@ -122,19 +122,19 @@
    [in]opA LinearOperator that applies the matrix vector product

    Detailed Description

    Smoothness estimation strategy based on the decay of Fourier expansion coefficients.

    -

    From the definition, we can write our Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    -\begin{eqnarray*}
+<p>From the definition, we can write our <a class=Fourier series expansion $a_{\bf k}$ of the finite element solution on cell $K$ with polynomial degree $p$ as a matrix product

    +\begin{eqnarray*}
    u_h({\bf x}) &=& \sum_j u_j \varphi_j ({\bf x}) \\
    u_{h, {\bf k}}({\bf x}) &=&
      \sum_{{\bf k}, \|{\bf k}\|\le p} a_{\bf k} \phi_{\bf k}({\bf x}),
      \quad a_{\bf k} = \sum_j {\cal F}_{{\bf k},j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2315.png"/>

    -

    with $u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
-{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
-F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    -

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    -\begin{eqnarray*}
+<p> with <picture><source srcset=$u_j$ the degrees of freedom and $\varphi_j$ the corresponding shape functions. $\{\phi_{\bf k}({\bf x}) = \exp(i \, 2 \pi \, {\bf k} \cdot
+{\bf x}) \}$ are exponential functions on cell $K$. $a_{\bf k}$ and ${\cal
+F}_{{\bf k},j}$ are coefficients and transformation matrices from the Fourier expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if mapping from the reference cell to the actual cell is linear. We use the class FESeries::Fourier to determine all coefficients $a_{\bf k}$.

    +

    If the finite element approximation on cell $K$ is part of the Hilbert space $H^s(K)$, then the following integral must exist for both the finite element and spectral representation of our solution

    +\begin{eqnarray*}
   \| \nabla^s u_h({\bf x}) \|_{L^2(K)}^2 &=&
     \int\limits_K \left| \nabla^s u_h({\bf x}) \right|^2 d{\bf x} <
     \infty \\
@@ -143,40 +143,40 @@
     a_{\bf k} \, \phi_{\bf k}({\bf x}) \right|^2 d{\bf x} =
     (2 \pi)^{2s} \sum\limits_{\bf k} \left| a_{\bf k} \right|^2
     \|{\bf k}\|_2^{2s} < \infty
-\end{eqnarray*} +\end{eqnarray*}" src="form_2319.png"/>

    The sum is finite only if the summands decay at least with order

    -\[
+<picture><source srcset=\[
   |a_{\bf k}|^2 \|{\bf k}\|_2^{2s} \|{\bf k}\|_2^{d - 1} =
     {\cal O}\left( \|{\bf k}\|_2^{-1-\epsilon} \right)
-\] +\]" src="form_2320.png"/>

    -

    for all $\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
-d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    -\[
+<p> for all <picture><source srcset=$\epsilon > 0$. The additional factor stems from the fact that, since we sum over all multi-indices ${\bf k}$ that are located on a dim-dimensional sphere, we actually have, up to a constant, $\|{\bf k}\|_2^{d-1}$ modes located in each increment $\|{\bf k}\|_2 +
+d\|{\bf k}\|_2$ that need to be taken into account. By a comparison of exponents, we can rewrite this condition as

    +\[
   |a_{\bf k}| = {\cal O}\left(\|{\bf k}\|_2^
     {-\left(s + \frac d2 + \epsilon \right)} \right)
-\] +\]" src="form_2325.png"/>

    -

    The next step is to estimate how fast these coefficients decay with $\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    -\[
+<p>The next step is to estimate how fast these coefficients decay with <picture><source srcset=$\|{\bf k}\|_2$. Thus, we perform a least-squares fit

    +\[
    \min_{\alpha,\sigma}
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( |a_{\bf k}| - \alpha \|{\bf k}\|_2^{-\sigma}\right)^2
-\] +\]" src="form_2327.png"/>

    -

    with regression coefficients $\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    -\[
+<p> with regression coefficients <picture><source srcset=$\alpha$ and $\sigma$. For simplification, we apply a logarithm on our minimization problem

    +\[
    \min_{\beta,\sigma}
    Q(\beta,\sigma) =
    \frac 12 \sum_{{\bf k}, \|{\bf k}\|_2 \le p}
    \left( \ln |a_{\bf k}| - \beta + \sigma \ln \|{\bf k}\|_2
 \right)^2,
-\] +\]" src="form_2328.png"/>

    -

    where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
-\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    -\[
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+\frac{\partial Q}{\partial\sigma}=0$, are linear in $\beta,\sigma$. We can write these conditions as follows:

    +\[
    \left(\begin{array}{cc}
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} 1 &
    \sum_{{\bf k}, \|{\bf k}\|_2 \le p} \ln \|{\bf k}\|_2
@@ -193,10 +193,10 @@
    \\
    \sum_{{\bf k}, \|{\bf k}\|_2\le p} \ln |a_{{\bf k}}| \ln \|{\bf
 k}\|_2 \end{array}\right)
-\] +\]" src="form_2332.png"/>

    -

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    -

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    +

    Solving for $\beta$ and $\sigma$ is just a linear regression fit and to do that we will use FESeries::linear_regression().

    +

    While we are not particularly interested in the actual value of $\beta$, the formula above gives us a means to calculate the value of the exponent $\sigma$ that we can then use to determine that $u(\hat{\bf x})$ is in $H^s(K)$ with $s=\sigma-\frac d2$. The decay rates $\sigma$ will suffice as our smoothness indicators and will be calculated on each cell for any provided solution.

    Note
    An extensive demonstration of the use of these functions is provided in step-27.

    Function Documentation

    @@ -241,17 +241,17 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\[
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Fourier polynomials $P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|\bf k\|_2$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\[
   \ln \left( \max\limits_{\|{\bf k}\|_2} |a_{\bf k}| \right) \sim
     C - \sigma \ln \|{\bf k}\|_2
-\] +\]" src="form_2346.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    -

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element. We exclude the $\|{\bf k}\|_2=0$ modes to avoid the singularity of the logarithm.

    +

    The regression_strategy parameter determines which norm will be used on the subset of coefficients $\bf k$ with the same absolute value $\|{\bf k}\|_2$. Default is VectorTools::Linfty_norm for a maximum approximation.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 368 of file smoothness_estimator.cc.

    @@ -300,11 +300,11 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    -

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    -

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    The coefficients_predicate parameter selects Fourier coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Fourier coefficients in each coordinate direction, i.e., set all the elements of the vector to true.

    +

    For a provided solution vector solution defined on a DoFHandler dof_handler, this function returns a vector smoothness_indicators with as many elements as there are cells where each element contains the estimated regularity $\sigma$.

    A series expansion object fe_fourier has to be supplied, which needs to be constructed with the same FECollection object as the dof_handler.

    -

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    +

    The parameter smallest_abs_coefficient allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are fewer than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.

    Smoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. The parameter only_flagged_cells controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.

    Definition at line 466 of file smoothness_estimator.cc.

    @@ -332,7 +332,7 @@

    Returns a FESeries::Fourier object for Fourier series expansions with the default configuration for smoothness estimation purposes.

    -

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    +

    For each finite element of the provided fe_collection, we use as many modes as its polynomial degree plus two. Further for each element, we use a 5-point Gaussian quarature iterated in each dimension by the maximal wave number, which is the number of modes decreased by one since we start with $k = 0$.

    As the Fourier expansion can only be performed on scalar fields, this class does not operate on vector-valued finite elements and will therefore throw an assertion. However, each component of a finite element field can be treated as a scalar field, respectively, on which Fourier expansions are again possible. For this purpose, the optional parameter component defines which component of each FiniteElement will be used. The default value of component only applies to scalar FEs, in which case it indicates that the sole component is to be decomposed. For vector-valued FEs, a non-default value must be explicitly provided.

    Definition at line 575 of file smoothness_estimator.cc.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-11-15 06:44:27.471654087 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSmoothnessEstimator_1_1Legendre.html 2024-11-15 06:44:27.471654087 +0000 @@ -122,25 +122,25 @@

    Detailed Description

    Smoothness estimation strategy based on the decay of Legendre expansion coefficients.

    -

    In one dimension, the finite element solution on cell $K$ with polynomial degree $p$ can be written as

    -\begin{eqnarray*}
+<p>In one dimension, the finite element solution on cell <picture><source srcset=$K$ with polynomial degree $p$ can be written as

    +\begin{eqnarray*}
    u_h(x) &=& \sum_j u_j \varphi_j (x) \\
    u_{h, k}(x) &=& \sum_{k=0}^{p} a_k \widetilde P_k (x),
    \quad a_k = \sum_j {\cal L}_{k,j} u_j
-\end{eqnarray*} +\end{eqnarray*}" src="form_2305.png"/>

    -

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    +

    where $u_j$ are degrees of freedom and $\varphi_j$ are the corresponding shape functions. $\{\widetilde P_k(x)\}$ are Legendre polynomials on cell $K$. $a_k$ and ${\cal L}_{k,j}$ are coefficients and transformation matrices from the Legendre expansion of each shape function. For practical reasons, we will perform the calculation of these matrices and coefficients only on the reference cell $\hat K$. We only have to calculate the transformation matrices once this way. However, results are only applicable if the mapping from the reference cell to the actual cell is affine. We use the class FESeries::Legendre to determine all coefficients $a_k$.

    A function is analytic, i.e., representable by a power series, if and only if their Legendre expansion coefficients decay as (see [eibner2007hp])

    -\[
+<picture><source srcset=\[
   |a_k| \sim c \, \exp(-\sigma k)
-\] +\]" src="form_2309.png"/>

    -

    We determine their decay rate $\sigma$ by performing the linear regression fit of

    -\[
+<p> We determine their decay rate <picture><source srcset=$\sigma$ by performing the linear regression fit of

    +\[
   \ln |a_k| \sim C - \sigma k
-\] +\]" src="form_2311.png"/>

    -

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    +

    for $k=0,\ldots,p$, with $p$ the polynomial degree of the finite element. The rate of the decay $\sigma$ can be used to estimate the smoothness. For example, one strategy to implement hp-refinement criteria is to perform p-refinement if $\sigma>1$ (see [mavriplis1994hp]).

    Function Documentation

    ◆ coefficient_decay()

    @@ -184,24 +184,24 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors $\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    -\begin{eqnarray*}
+<p>In this variant of the estimation strategy for higher dimensions, we will consider all mode vectors <picture><source srcset=$\bf k$ describing Legendre polynomials $\widetilde P_{\bf k}$ and perform one least-squares fit over all coefficients at once. If there are multiple coefficients corresponding to the same absolute value of modes $\|{\bf k}\|_1$, we take the maximum among those. Thus, the least-squares fit is performed on

    +\begin{eqnarray*}
   \widetilde P_{\bf k}({\bf x}) &=&
     \widetilde P_{k_1} (x_1) \ldots \widetilde P_{k_d} (x_d) \\
   \ln \left( \max\limits_{\|{\bf k}\|_1} |a_{\bf k}| \right) &\sim&
     C - \sigma \|{\bf k}\|_1
-\end{eqnarray*} +\end{eqnarray*}" src="form_2338.png"/>

    -

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    +

    for ${\bf k}=(k_1,\ldots,k_d)$ and $k_i=0,\ldots,p$, with $p$ the polynomial degree of the finite element.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction for every finite element in the collection, where $p$ is its polynomial degree.
    [in]dof_handlerA DoFHandler.
    [in]solutionA solution vector.
    [out]smoothness_indicatorsA vector for smoothness indicators.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]regression_strategyDetermines which norm will be used on the subset of coefficients $\mathbf{k}$ with the same absolute value $\|{\bf k}\|_1$. Default is VectorTools::Linfty_norm for a maximum approximation.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to a signaling NaN.
    @@ -254,16 +254,16 @@ const bool only_flagged_cells = false&#href_anchor"memdoc"> -

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    +

    In this variant of the estimation strategy for higher dimensions, we only consider modes in each coordinate direction, i.e., only mode vectors $\bf k$ with one nonzero entry. We perform the least-squares fit in each coordinate direction separately and take the lowest decay rate $\sigma$ among them.

    For a finite element approximation solution, this function writes the decay rate for every cell into the output vector smoothness_indicators.

    Parameters
    - + - - + +
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]fe_legendreFESeries::Legendre object to calculate coefficients. This object needs to be initialized to have at least $p+1$ coefficients in each direction, where $p$ is the maximum polynomial degree to be used.
    [in]dof_handlerA DoFHandler
    [in]solutionA solution vector
    [out]smoothness_indicatorsA vector for smoothness indicators
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]coefficients_predicateA predicate to select Legendre coefficients $a_j$, $j=0,\ldots,p$ for linear regression in each coordinate direction. The user is responsible for updating the vector of flags provided to this function. Note that its size is $p+1$, where $p$ is the polynomial degree of the FE basis on a given element. The default implementation will use all Legendre coefficients in each coordinate direction, i.e. set all elements of the vector to true.
    [in]smallest_abs_coefficientThe smallest absolute value of the coefficient to be used in linear regression in each coordinate direction. Note that Legendre coefficients of some functions may have a repeating pattern of zero coefficients (i.e. for functions that are locally symmetric or antisymmetric about the midpoint of the element in any coordinate direction). Thus this parameters allows to ignore small (in absolute value) coefficients within the linear regression fit. In case there are less than two nonzero coefficients for a coordinate direction, this direction will be skipped. If all coefficients are zero, the returned value for this cell will be $\sigma=\infty$.
    [in]only_flagged_cellsSmoothness indicators are usually used to decide whether to perform h- or p-adaptation. So in most cases, we only need to calculate those indicators on cells flagged for refinement or coarsening. This parameter controls whether this particular subset or all cells will be considered. By default, all cells will be considered. When only flagged cells are supposed to be considered, smoothness indicators will only be set on those vector entries of flagged cells; the others will be set to NaN.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-11-15 06:44:27.487654230 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceSparseMatrixTools.html 2024-11-15 06:44:27.487654230 +0000 @@ -152,18 +152,18 @@ SparsityPatternType2 & sparsity_pattern_out&#href_anchor"memdoc">

    Given a sparse matrix (system_matrix, sparsity_pattern), construct a new sparse matrix (system_matrix_out, sparsity_pattern_out) by restriction

    -\[
+<picture><source srcset=\[
  A_i = R_i A R_i^T,
-\] +\]" src="form_2014.png"/>

    -

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    -

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    +

    where the Boolean matrix $R_i$ is defined by the entries of requested_is.

    +

    The function can be called by multiple processes with different sets of indices, allowing to assign each process a different $A_i$.

    Such a function is useful to implement Schwarz methods, where operations of type

    -\[
+<picture><source srcset=\[
  u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1})
-\] +\]" src="form_2016.png"/>

    -

    are performed to iteratively solve a system of type $Au=f$.

    +

    are performed to iteratively solve a system of type $Au=f$.

    Warning
    This is a collective call that needs to be executed by all processes in the communicator of sparse_matrix_in.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-11-15 06:44:27.507654409 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceTensorAccessors.html 2024-11-15 06:44:27.507654409 +0000 @@ -191,7 +191,7 @@
    Note
    This function returns an internal class object consisting of an array subscript operator operator[](unsigned int) and an alias value_type describing its return value.
    Template Parameters
    - +
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    indexThe index to be shifted to the end. Indices are counted from 0, thus the valid range is $0\le\text{index}<\text{rank}$.
    rankRank of the tensorial object t
    TA tensorial object of rank rank. T must provide a local alias value_type and an index operator operator[]() that returns a (const or non-const) reference of value_type.
    @@ -275,12 +275,12 @@

    This function contracts two tensorial objects left and right and stores the result in result. The contraction is done over the last no_contr indices of both tensorial objects:

    -\[
+<picture><source srcset=\[
   \text{result}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   = \sum_{k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{left}_{i_1,..,i_{r1},k_1,..,k_{\mathrm{no\_contr}}}
     \mathrm{right}_{j_1,..,j_{r2},k_1,..,k_{\mathrm{no\_contr}}}
-\] +\]" src="form_900.png"/>

    Calling this function is equivalent of writing the following low level code:

    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    ...
    @@ -335,12 +335,12 @@

    Full contraction of three tensorial objects:

    -\[
+<picture><source srcset=\[
   \sum_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{left}_{i_1,..,i_{r1}}
   \text{middle}_{i_1,..,i_{r1},j_1,..,j_{r2}}
   \text{right}_{j_1,..,j_{r2}}
-\] +\]" src="form_901.png"/>

    Calling this function is equivalent of writing the following low level code:

    T1 result = T1();
    for(unsigned int i_0 = 0; i_0 < dim; ++i_0)
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-11-15 06:44:27.547654766 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities.html 2024-11-15 06:44:27.547654766 +0000 @@ -1034,13 +1034,13 @@

    Calculate a fixed power, provided as a template argument, of a number.

    -

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    +

    This function provides an efficient way to calculate things like $t^N$ where N is a known number at compile time. The function computes the power of $t$ via the "recursive doubling" approach in which, for example, $t^7$ is computed as

    \begin{align*}
   t^7 = (tttt)(tt)(t)
 \end{align*}

    where computing $tt$ requires one product, computing $tttt$ is achieved by multiplying the previously computed $tt$ by itself (requiring another multiplication), and then the product is computed via two more multiplications for a total of 4 multiplications instead of the naively necessary 6.

    -

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    +

    The major savings this function generates result, however, from the fact that it exploits that we have an integer power of the argument $t$. The alternative to computing such powers, std::pow(t,7) uses the std::pow function that takes the exponent as a floating point number and, because it has to cater to the complexities of the general situation, is vastly slower.

    Use this function as in fixed_power<dim> (t) or fixed_power<7> (t).

    Definition at line 942 of file utilities.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-11-15 06:44:27.563654909 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1LinearAlgebra.html 2024-11-15 06:44:27.563654909 +0000 @@ -289,8 +289,8 @@ VectorMemory< VectorType > & vector_memory&#href_anchor"memdoc"> -

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
-\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    +

    Apply Chebyshev polynomial of the operator H to x. For a non-defective operator $H$ with a complete set of eigenpairs $H \psi_i = \lambda_i \psi_i$, the action of a polynomial filter $p$ is given by $p(H)x =\sum_i a_i p(\lambda_i) \psi_i$, where $x=: \sum_i a_i
+\psi_i$. Thus by appropriately choosing the polynomial filter, one can alter the eigenmodes contained in $x$.

    This function uses Chebyshev polynomials of first kind. Below is an example of polynomial $T_n(x)$ of degree $n=8$ normalized to unity at $-1.2$.

    @@ -298,8 +298,8 @@
    -

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    -

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    +

    By introducing a linear mapping $L$ from unwanted_spectrum to $[-1,1]$, we can dump the corresponding modes in x. The higher the polynomial degree $n$, the more rapid it grows outside of the $[-1,1]$. In order to avoid numerical overflow, we normalize polynomial filter to unity at tau. Thus, the filtered operator is $p(H) = T_n(L(H))/T_n(L(\tau))$.

    +

    The action of the Chebyshev filter only requires evaluation of vmult() of H and is based on the recursion equation for Chebyshev polynomial of degree $n$: $T_{n}(x) = 2x T_{n-1}(x) - T_{n-2}(x)$ with $T_0(x)=1$ and $T_1(x)=x$.

    vector_memory is used to allocate memory for temporary objects.

    This function implements the algorithm (with a minor fix of sign of $\sigma_1$) from [Zhou2014].

    Note
    If tau is equal to std::numeric_limits<double>::infinity(), no normalization will be performed.
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-11-15 06:44:27.607655302 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI.html 2024-11-15 06:44:27.607655302 +0000 @@ -1313,7 +1313,7 @@ const MPI_Comm comm&#href_anchor"memdoc"> -

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    +

    For each process $p$ on a communicator with $P$ processes, compute both the (exclusive) partial sum $\sum_{i=0}^{p-1} v_i$ and the total sum $\sum_{i=0}^{P-1} v_i$, and return these two values as a pair. The former is computed via the MPI_Exscan function where the partial sum is typically called "(exclusive) scan" of the values $v_p$ provided by the individual processes. The term "prefix sum" is also used.

    This function is only available if T is a type natively supported by MPI.

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-11-15 06:44:27.635655552 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceUtilities_1_1MPI_1_1ConsensusAlgorithms.html 2024-11-15 06:44:27.635655552 +0000 @@ -140,7 +140,7 @@ std::vector< unsigned int >&#href_anchor"memTemplItemRight" valign="bottom">selector (const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< void(const unsigned int, const RequestType &)> &process_request, const MPI_Comm comm) &#href_anchor"details" id="details">

    Detailed Description

    A namespace for algorithms that implement the task of communicating in a dynamic-sparse way. In computer science, this is often called a consensus problem.

    -

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    +

    The problem consensus algorithms are trying to solve is this: Let's say you have $P$ processes that work together via MPI. Each (or at least some) of these want to send information to some of the other processes, or request information from other processes. No process knows which other process wants to communicate with them. The challenge is to determine who needs to talk to whom and what information needs to be sent, and to come up with an algorithm that ensures that this communication happens.

    That this is not a trivial problem can be seen by an analogy of the postal service. There, some senders may request information from some other participants in the postal service. So they send a letter that requests the information, but the recipients do not know how many such letters they need to expect (or that they should expect any at all). They also do not know how long they need to keep checking their mailbox for incoming requests. The recipients can be considered reliable, however: We can assume that everyone who is sent a request puts a letter with the answer in the mail. This time at least the recipients of these answers know that they are waiting for these answers because they have previously sent a request. They do not know in advance, however, when the answer will arrive and how long to wait. The goal of a consensus algorithm is then to come up with a strategy in which every participant can say who they want to send requests to, what that request is, and is then guaranteed an answer. The algorithm will only return when all requests by all participants have been answered and the answer delivered to the requesters.

    The problem is generally posed in terms of requests and answers. In practice, either of these two may be empty messages. For example, processes may simply want to send information to others that they know these others need; in this case, the "answer" message may be empty and its meaning is simply an affirmation that the information was received. Similarly, in some cases processes simply need to inform others that they want information, but the destination process knows what information is being requested (based on where in the program the request happens) and can send that information without there be any identifying information in the request; in that case, the request message may be empty and simply serve to identify the requester. (Each message can be queried for its sender.)

    As mentioned in the first paragraph, the algorithms we are interested in are "dynamic-sparse":

      /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-11-15 06:44:27.743656517 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceVectorTools.html 2024-11-15 06:44:27.743656517 +0000 @@ -343,7 +343,7 @@

    • -

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      +

      Projection: compute the L2-projection of the given function onto the finite element space, i.e. if f is the function to be projected, compute fh in Vh such that (fh,vh)=(f,vh) for all discrete test functions vh. This is done through the solution of the linear system of equations M v = f where M is the mass matrix $m_{ij} = \int_\Omega \phi_i(x) \phi_j(x) dx$ and $f_i = \int_\Omega f(x) \phi_i(x) dx$. The solution vector $v$ then is the nodal representation of the projection fh. The project() functions are used in the step-21 and step-23 tutorial programs.

      In order to get proper results, it be may necessary to treat boundary conditions right. Below are listed some cases where this may be needed. If needed, this is done by L2-projection of the trace of the given function onto the finite element space restricted to the boundary of the domain, then taking this information and using it to eliminate the boundary nodes from the mass matrix of the whole domain, using the MatrixTools::apply_boundary_values() function. The projection of the trace of the function to the boundary is done with the VectorTools::project_boundary_values() (see below) function, which is called with a map of boundary functions std::map<types::boundary_id, const Function<spacedim,number>*> in which all boundary indicators from zero to numbers::internal_face_boundary_id-1 (numbers::internal_face_boundary_id is used for other purposes, see the Triangulation class documentation) point to the function to be projected. The projection to the boundary takes place using a second quadrature formula on the boundary given to the project() function. The first quadrature formula is used to compute the right hand side and for numerical quadrature of the mass matrix.

      The projection of the boundary values first, then eliminating them from the global system of equations is not needed usually. It may be necessary if you want to enforce special restrictions on the boundary values of the projected function, for example in time dependent problems: you may want to project the initial values but need consistency with the boundary values for later times. Since the latter are projected onto the boundary in each time step, it is necessary that we also project the boundary values of the initial values, before projecting them to the whole domain.

      Obviously, the results of the two schemes for projection are different. Usually, when projecting to the boundary first, the L2-norm of the difference between original function and projection over the whole domain will be larger (factors of five have been observed) while the L2-norm of the error integrated over the boundary should of course be less. The reverse should also hold if no projection to the boundary is performed.

      @@ -406,220 +406,220 @@
    -

    Denote which norm/integral is to be computed by the integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    -

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    -\[
+<p>Denote which norm/integral is to be computed by the <a class=integrate_difference() function on each cell and compute_global_error() for the whole domain. Let $f:\Omega \rightarrow \mathbb{R}^c$ be a finite element function with $c$ components where component $c$ is denoted by $f_c$ and $\hat{f}$ be the reference function (the fe_function and exact_solution arguments to integrate_difference()). Let $e_c = \hat{f}_c - f_c$ be the difference or error between the two. Further, let $w:\Omega \rightarrow \mathbb{R}^c$ be the weight function of integrate_difference(), which is assumed to be equal to one if not supplied. Finally, let $p$ be the exponent argument (for $L_p$-norms).

    +

    In the following,we denote by $E_K$ the local error computed by integrate_difference() on cell $K$, whereas $E$ is the global error computed by compute_global_error(). Note that integrals are approximated by quadrature in the usual way:

    +\[
 \int_A f(x) dx \approx \sum_q f(x_q) \omega_q.
-\] +\]" src="form_2398.png"/>

    Similarly for suprema over a cell $T$:

    -\[
+<picture><source srcset=\[
 \sup_{x\in T} |f(x)| dx \approx \max_q |f(x_q)|.
-\] +\]" src="form_2399.png"/>

    - -
    Enumerator
    mean 

    The function or difference of functions is integrated on each cell $K$:

    -\[
+<picture><source srcset=\[
   E_K
 = \int_K \sum_c (\hat{f}_c - f_c) \, w_c
 = \int_K \sum_c e_c \, w_c
-\] +\]" src="form_2400.png"/>

    and summed up to get

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K
     = \int_\Omega \sum_c (\hat{f}_c - f_c) \, w_c
-\] +\]" src="form_2401.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \int_\Omega (\hat{f} - f)
     = \int_\Omega e.
-\] +\]" src="form_2403.png"/>

    -

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    +

    Note: This differs from what is typically known as the mean of a function by a factor of $\frac{1}{|\Omega|}$. To compute the mean you can also use compute_mean_value(). Finally, pay attention to the sign: if $\hat{f}=0$, this will compute the negative of the mean of $f$.

    L1_norm 

    The absolute value of the function is integrated:

    -\[
+<picture><source srcset=\[
   E_K = \int_K \sum_c |e_c| \, w_c
-\] +\]" src="form_2406.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sum_K E_K = \int_\Omega \sum_c |e_c| w_c,
-\] +\]" src="form_2407.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \| e \|_{L^1}.
-\] +\]" src="form_2408.png"/>

    L2_norm 

    The square of the function is integrated and the square root of the result is computed on each cell:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2409.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega  \sum_c e_c^2 \, w_c }
-\] +\]" src="form_2410.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \sqrt{ \int_\Omega e^2 }
     = \| e \|_{L^2}
-\] +\]" src="form_2411.png"/>

    Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    -\[
+<tr><td class=Lp_norm 

    The absolute value to the $p$-th power is integrated and the $p$-th root is computed on each cell. The exponent $p$ is the exponent argument of integrate_difference() and compute_mean_value():

    +\[
   E_K = \left( \int_K \sum_c |e_c|^p \, w_c \right)^{1/p}
-\] +\]" src="form_2412.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \left( \sum_K E_K^p \right)^{1/p}
-\] +\]" src="form_2413.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| e \|_{L^p}.
-\] +\]" src="form_2414.png"/>

    Linfty_norm 

    The maximum absolute value of the function:

    -\[
+<picture><source srcset=\[
   E_K = \sup_K \max_c |e_c| \, w_c
-\] +\]" src="form_2415.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \max_K E_K
 = \sup_\Omega \max_c |e_c| \, w_c
-\] +\]" src="form_2416.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E  = \sup_\Omega \|e\|_\infty = \| e \|_{L^\infty}.
-\] +\]" src="form_2417.png"/>

    H1_seminorm 

    L2_norm of the gradient:

    -\[
+<picture><source srcset=\[
   E_K = \sqrt{ \int_K \sum_c (\nabla e_c)^2 \, w_c }
-\] +\]" src="form_2418.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2} = \sqrt{ \int_\Omega \sum_c (\nabla e_c)^2 \,
 w_c }
-\] +\]" src="form_2419.png"/>

    -

    or, for $w \equiv 1$:

    -\[
+<p> or, for <picture><source srcset=$w \equiv 1$:

    +\[
   E = \| \nabla e \|_{L^2}.
-\] +\]" src="form_2420.png"/>

    Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    -\[
+<tr><td class=Hdiv_seminorm 

    L2_norm of the divergence of a vector field. The function $f$ is expected to have $c \geq \text{dim}$ components and the first dim will be used to compute the divergence:

    +\[
   E_K = \sqrt{ \int_K \left( \sum_c \frac{\partial e_c}{\partial x_c} \,
 \sqrt{w_c} \right)^2 }
-\] +\]" src="form_2422.png"/>

    and

    -\[
+<picture><source srcset=\[
   E = \sqrt{\sum_K E_K^2}
     = \sqrt{ \int_\Omega \left( \sum_c \frac{\partial e_c}{\partial x_c}
 \, \sqrt{w_c} \right)^2  }
-\] +\]" src="form_2423.png"/>

    -

    or, for $w \equiv 1$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-11-15 06:44:27.779656838 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespacehp_1_1Refinement.html 2024-11-15 06:44:27.779656838 +0000 @@ -543,9 +543,9 @@

    -

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    +

    Predict how the current error_indicators will adapt after refinement and coarsening were to happen on the provided dof_handler, and write its results to predicted_errors. Each entry of error_indicators and predicted_errors corresponds to an active cell on the underlying Triangulation, thus each container has to be of size Triangulation::n_active_cells(). The errors are interpreted to be measured in the energy norm; this assumption enters the rate of convergence that is used in the prediction. The $l_2$-norm of the output argument predicted_errors corresponds to the predicted global error after adaptation.

    For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p.

    -

    For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

    +

    For h-adaptation, we expect the local error $\eta_K$ on cell $K$ to be proportional to $(h_K)^{p_K}$ in the energy norm, where $h_K$ denotes the cell diameter and $p_K$ the polynomial degree of the currently assigned finite element on cell $K$.

    During h-coarsening, the finite elements on siblings may be different, and their parent cell will be assigned to their least dominating finite element that belongs to its most general child. Thus, we will always interpolate on an enclosing finite element space. Additionally assuming that the finite elements on the cells to be coarsened are sufficient to represent the solution correctly (e.g. at least quadratic basis functions for a quadratic solution), we are confident to say that the error will not change by sole interpolation on the larger finite element space.

    For p-adaptation, the local error is expected to converge exponentially with the polynomial degree of the assigned finite element. Each increase or decrease of the degree will thus change its value by a user-defined control parameter gamma_p. The assumption of exponential convergence is only valid if both h- and p-adaptive methods are combined in a sense that they are both utilized throughout a mesh, but do not have to be applied both on a cell simultaneously.

    The prediction algorithm is formulated as follows with control parameters gamma_p, gamma_h and gamma_n that may be used to influence prediction for each adaptation type individually. The results for each individual cell are stored in the predicted_errors output argument.

    @@ -567,7 +567,7 @@ \gamma_\text{p}^{(p_{K,\text{future}} - p_{K})}$" src="form_1575.png"/>

    On basis of the refinement history, we use the predicted error estimates to decide how cells will be adapted in the next adaptation step. Comparing the predicted error from the previous adaptation step to the error estimates of the current step allows us to justify whether our previous choice of adaptation was justified, and lets us decide how to adapt in the next one.

    -

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    +

    We thus have to transfer the predicted error from the old to the adapted mesh. When transferring the predicted error to the adapted mesh, make sure to configure your CellDataTransfer object with AdaptationStrategies::Refinement::l2_norm() as a refinement strategy and AdaptationStrategies::Coarsening::l2_norm() as a coarsening strategy. This ensures that the $l_2$-norm of the predict errors is preserved on both meshes.

    In this context, we assume that the local error on a cell to be h-refined will be divided equally on all of its $n_{K_c}$ children, whereas local errors on siblings will be summed up on the parent cell in case of h-coarsening. This assumption is often not satisfied in practice: For example, if a cell is at a corner singularity, then the one child cell that ends up closest to the singularity will inherit the majority of the remaining error – but this function can not know where the singularity will be, and consequently assumes equal distribution.

    Incorporating the transfer from the old to the adapted mesh, the complete error prediction algorithm reads as follows:

    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-11-15 06:44:27.895657875 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceinternal.html 2024-11-15 06:44:27.899657910 +0000 @@ -949,8 +949,8 @@
    const double coordinate_value&#href_anchor"memdoc">

    Creates a (dim + 1)-dimensional point by copying over the coordinates of the incoming dim-dimensional point and setting the "missing" (dim + 1)-dimensional component to the incoming coordinate value.

    -

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    -

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    +

    For example, given the input $\{(x, y), 2, z \}$ this function creates the point $(x, y, z)$.

    +

    The coordinates of the dim-dimensional point are written to the coordinates of the (dim + 1)-dimensional point in the order of the convention given by the function coordinate_to_one_dim_higher. Thus, the order of coordinates on the lower-dimensional point are not preserved: $\{(z, x), 1, y \}$ creates the point $(x, y, z)$.

    Definition at line 23 of file function_restriction.cc.

    @@ -2594,7 +2594,7 @@
    -

    Compute the polynomial interpolation of a tensor product shape function $\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
+<p>Compute the polynomial interpolation of a tensor product shape function <picture><source srcset=$\varphi_i$ given a vector of coefficients $u_i$ in the form $u_h(\mathbf{x}) = \sum_{i=1}^{k^d} \varphi_i(\mathbf{x}) u_i$. The shape functions $\varphi_i(\mathbf{x}) =
 \prod_{d=1}^{\text{dim}}\varphi_{i_d}^\text{1d}(x_d)$ represent a tensor product. The function returns a pair with the value of the interpolation as the first component and the gradient in reference coordinates as the second component. Note that for compound types (e.g. the values field begin a Point<spacedim> argument), the components of the gradient are sorted as Tensor<1, dim, Tensor<1, spacedim>> with the derivatives as the first index; this is a consequence of the generic arguments in the function.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-11-15 06:44:27.927658160 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/namespaceparallel.html 2024-11-15 06:44:27.927658160 +0000 @@ -396,7 +396,7 @@
    const unsigned int grainsize&#href_anchor"memdoc">

    This function works a lot like the apply_to_subranges() function, but it allows to accumulate numerical results computed on each subrange into one number. The type of this number is given by the ResultType template argument that needs to be explicitly specified, and results are added up (i.e., the reduction of results from subranges happens by adding up these results).

    -

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    +

    An example of use of this function is to compute the value of the expression $x^T A x$ for a square matrix $A$ and a vector $x$. The sum over rows can be parallelized and the whole code might look like this:

    void matrix_norm (const FullMatrix &A,
    const Vector &x)
    {
    return
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-11-15 06:44:27.955658410 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_1.html 2024-11-15 06:44:27.955658410 +0000 @@ -358,7 +358,7 @@

    This program obviously does not have a whole lot of functionality, but in particular the second_grid function has a bunch of places where you can play with it. For example, you could modify the criterion by which we decide which cells to refine. An example would be to change the condition to this:

    if (cell->center()[1] > 0)
    cell->set_refine_flag ();
    -

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    +

    This would refine all cells for which the $y$-coordinate of the cell's center is greater than zero (the TriaAccessor::center function that we call by dereferencing the cell iterator returns a Point<2> object; subscripting [0] would give the $x$-coordinate, subscripting [1] the $y$-coordinate). By looking at the functions that TriaAccessor provides, you can also use more complicated criteria for refinement.

    In general, what you can do with operations of the form cell->something() is a bit difficult to find in the documentation because cell is not a pointer but an iterator. The functions you can call on a cell can be found in the documentation of the classes TriaAccessor (which has functions that can also be called on faces of cells or, more generally, all sorts of geometric objects that appear in a triangulation), and CellAccessor (which adds a few functions that are specific to cells).

    A more thorough description of the whole iterator concept can be found in the Iterators on mesh-like containers documentation topic.

    Different geometries

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-11-15 06:44:27.983658660 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_10.html 2024-11-15 06:44:27.983658660 +0000 @@ -123,11 +123,11 @@
  • The plain program
  • Introduction

    -

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    +

    This is a rather short example which only shows some aspects of using higher order mappings. By mapping we mean the transformation between the unit cell (i.e. the unit line, square, or cube) to the cells in real space. In all the previous examples, we have implicitly used linear or d-linear mappings; you will not have noticed this at all, since this is what happens if you do not do anything special. However, if your domain has curved boundaries, there are cases where the piecewise linear approximation of the boundary (i.e. by straight line segments) is not sufficient, and you want that your computational domain is an approximation to the real domain using curved boundaries as well. If the boundary approximation uses piecewise quadratic parabolas to approximate the true boundary, then we say that this is a quadratic or $Q_2$ approximation. If we use piecewise graphs of cubic polynomials, then this is a $Q_3$ approximation, and so on.

    For some differential equations, it is known that piecewise linear approximations of the boundary, i.e. $Q_1$ mappings, are not sufficient if the boundary of the exact domain is curved. Examples are the biharmonic equation using $C^1$ elements, or the Euler equations of gas dynamics on domains with curved reflective boundaries. In these cases, it is necessary to compute the integrals using a higher order mapping. If we do not use such a higher order mapping, the order of approximation of the boundary dominates the order of convergence of the entire numerical scheme, irrespective of the order of convergence of the discretization in the interior of the domain.

    Rather than demonstrating the use of higher order mappings with one of these more complicated examples, we do only a brief computation: calculating the value of $\pi=3.141592653589793238462643\ldots$ by two different methods.

    -

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    -

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    +

    The first method uses a triangulated approximation of the circle with unit radius and integrates a unit magnitude constant function ( $f = 1$) over it. Of course, if the domain were the exact unit circle, then the area would be $\pi$, but since we only use an approximation by piecewise polynomial segments, the value of the area we integrate over is not exactly $\pi$. However, it is known that as we refine the triangulation, a $Q_p$ mapping approximates the boundary with an order $h^{p+1}$, where $h$ is the mesh size. We will check the values of the computed area of the circle and their convergence towards $\pi$ under mesh refinement for different mappings. We will also find a convergence behavior that is surprising at first, but has a good explanation.

    +

    The second method works similarly, but this time does not use the area of the triangulated unit circle, but rather its perimeter. $\pi$ is then approximated by half of the perimeter, as we choose the radius equal to one.

    Note
    This tutorial shows in essence how to choose a particular mapping for integrals, by attaching a particular geometry to the triangulation (as had already been done in step-1, for example) and then passing a mapping argument to the FEValues class that is used for all integrals in deal.II. The geometry we choose is a circle, for which deal.II already has a class (SphericalManifold) that can be used. If you want to define your own geometry, for example because it is complicated and cannot be described by the classes already available in deal.II, you will want to read through step-53.

    The commented program

    The first of the following include files are probably well-known by now and need no further explanation.

    @@ -169,7 +169,7 @@
    void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
    -

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

    +

    Then alternate between generating output on the current mesh for $Q_1$, $Q_2$, and $Q_3$ mappings, and (at the end of the loop body) refining the mesh once globally.

      for (unsigned int refinement = 0; refinement < 2; ++refinement)
      {
      std::cout << "Refinement level: " << refinement << std::endl;
    @@ -207,9 +207,9 @@
      }
     
    void refine_global(const unsigned int times=1)
    -

    Now we proceed with the main part of the code, the approximation of $\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
+</div><!-- fragment --><p>Now we proceed with the main part of the code, the approximation of <picture><source srcset=$\pi$. The area of a circle is of course given by $\pi r^2$, so having a circle of radius 1, the area represents just the number that is searched for. The numerical computation of the area is performed by integrating the constant function of value 1 over the whole computational domain, i.e. by computing the areas $\int_K 1 dx=\int_{\hat K} 1
    \ \textrm{det}\ J(\hat x) d\hat x \approx \sum_i \textrm{det}
-   \ J(\hat x_i)w(\hat x_i)$, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

    + \ J(\hat x_i)w(\hat x_i)$" src="form_2879.png"/>, where the sum extends over all quadrature points on all active cells in the triangulation, with $w(x_i)$ being the weight of quadrature point $x_i$. The integrals on each cell are approximated by numerical quadrature, hence the only additional ingredient we need is to set up a FEValues object that provides the corresponding JxW values of each cell. (Note that JxW is meant to abbreviate Jacobian determinant times weight; since in numerical quadrature the two factors always occur at the same places, we only offer the combined quantity, rather than two separate ones.) We note that here we won't use the FEValues object in its original purpose, i.e. for the computation of values of basis functions of a specific finite element at certain quadrature points. Rather, we use it only to gain the JxW at the quadrature points, irrespective of the (dummy) finite element we will give to the constructor of the FEValues object. The actual finite element given to the FEValues object is not used at all, so we could give any.

      template <int dim>
      void compute_pi_by_area()
      {
    @@ -245,7 +245,7 @@
     
    @ update_JxW_values
    Transformed quadrature weights.
    -

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

    +

    We employ an object of the ConvergenceTable class to store all important data like the approximated values for $\pi$ and the error with respect to the true value of $\pi$. We will also use functions provided by the ConvergenceTable class to compute convergence rates of the approximations to $\pi$.

      ConvergenceTable table;
     
    @@ -294,7 +294,7 @@
      }
     
     
    -

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

    +

    The following, second function also computes an approximation of $\pi$ but this time via the perimeter $2\pi r$ of the domain instead of the area. This function is only a variation of the previous function. So we will mainly give documentation for the differences.

      template <int dim>
      void compute_pi_by_perimeter()
      {
    @@ -417,11 +417,11 @@
    unset ytics
    plot [-1:1][-1:1] "ball_0_mapping_q_1.dat" lw 4 lt rgb "black"

    or using one of the other filenames. The second line makes sure that the aspect ratio of the generated output is actually 1:1, i.e. a circle is drawn as a circle on your screen, rather than as an ellipse. The third line switches off the key in the graphic, as that will only print information (the filename) which is not that important right now. Similarly, the fourth and fifth disable tick marks. The plot is then generated with a specific line width ("lw", here set to 4) and line type ("lt", here chosen by saying that the line should be drawn using the RGB color "black").

    -

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    +

    The following table shows the triangulated computational domain for $Q_1$, $Q_2$, and $Q_3$ mappings, for the original coarse grid (left), and a once uniformly refined grid (right).

    Five-cell discretization of the disk.
    20-cell discretization of the disk (i.e., five cells
               refined once).
    Five-cell discretization of the disk with quadratic edges. The
               boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with quadratic edges.
    Five-cell discretization of the disk with cubic edges. The
-              boundary is nearly indistinguishable from the actual circle.
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    + boundary is nearly indistinguishable from the actual circle." style="pointer-events: none;" width="400" height="400" class="inline"/>
    20-cell discretization with cubic edges.

    These pictures show the obvious advantage of higher order mappings: they approximate the true boundary quite well also on rather coarse meshes. To demonstrate this a little further, here is part of the upper right quarter circle of the coarse meshes with $Q_2$ and $Q_3$ mappings, where the dashed red line marks the actual circle:

    Close-up of quadratic discretization. The distance between the
          quadratic interpolant and the actual circle is small.
    Close-up of cubic discretization. The distance between the
          cubic interpolant and the actual circle is very small.

    Obviously the quadratic mapping approximates the boundary quite well, while for the cubic mapping the difference between approximated domain and true one is hardly visible already for the coarse grid. You can also see that the mapping only changes something at the outer boundaries of the triangulation. In the interior, all lines are still represented by linear functions, resulting in additional computations only on cells at the boundary. Higher order mappings are therefore usually not noticeably slower than lower order ones, because the additional computations are only performed on a small subset of all cells.

    @@ -513,15 +513,15 @@
    1280 3.1415926535897896 3.5527e-15 3.32
    5120 3.1415926535897940 8.8818e-16 2.00
    unsigned int level
    Definition grid_out.cc:4626
    -
    Note
    Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
    -

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    -

    The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error +

    Note
    Once the error reaches a level on the order of $10^{-13}$ to $10^{-15}$, it is essentially dominated by round-off and consequently dominated by what precisely the library is doing in internal computations. Since these things change, the precise values and errors change from release to release at these round-off levels, though the overall order of errors should of course remain the same. See also the comment below in the section on Possibilities for extensions about how to compute these results more accurately.
    +

    One of the immediate observations from the output above is that in all cases the values converge quickly to the true value of $\pi=3.141592653589793238462643$. Note that for the $Q_4$ mapping, we are already in the regime of roundoff errors and the convergence rate levels off, which is already quite a lot. However, also note that for the $Q_1$ mapping, even on the finest grid the accuracy is significantly worse than on the coarse grid for a $Q_3$ mapping!

    +

    The last column of the output shows the convergence order, in powers of the mesh width $h$. In the introduction, we had stated that the convergence order for a $Q_p$ mapping should be $h^{p+1}$. However, in the example shown, the order is rather $h^{2p}$! This at first surprising fact is explained by the properties of the $Q_p$ mapping. At order p, it uses support points that are based on the p+1 point Gauss-Lobatto quadrature rule that selects the support points in such a way that the quadrature rule converges at order 2p. Even though these points are here only used for interpolation of a pth order polynomial, we get a superconvergence effect when numerically evaluating the integral, resulting in the observed high order of convergence. (This effect is also discussed in detail in the following publication: A. Bonito, A. Demlow, and J. Owen: "A priori error estimates for finite element approximations to eigenvalues and eigenfunctions of the Laplace-Beltrami operator", submitted, 2018.)

    Possibilities for extensions

    -

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    -

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    +

    As the table of numbers copied from the output of the program shows above, it is not very difficult to compute the value of $\pi$ to 13 or 15 digits. But, the output also shows that once we approach the level of accuracy with which double precision numbers store information (namely, with roughly 16 digits of accuracy), we no longer see the expected convergence order and the error no longer decreases with mesh refinement as anticipated. This is because both within this code and within the many computations that happen within deal.II itself, each operation incurs an error on the order of $10^{-16}$; adding such errors many times over then results in an error that may be on the order of $10^{-14}$, which will dominate the discretization error after a number of refinement steps and consequently destroy the convergence rate.

    +

    The question is whether one can do anything about this. One thought is to use a higher-precision data type. For example, one could think of declaring both the area and perimeter variables in compute_pi_by_area() and compute_pi_by_perimeter() with data type long double. long double is a data type that is not well specified in the C++ standard but at least on Intel processors has around 19, instead of around 16, digits of accuracy. If we were to do that, we would get results that differ from the ones shown above. However, maybe counter-intuitively, they are not uniformly better. For example, when computing $\pi$ by the area, at the time of writing these sentences we get these values with double precision for degree 4:

    5 3.1415871927401144 5.4608e-06 -
    20 3.1415926314742491 2.2116e-08 7.95
    80 3.1415926535026268 8.7166e-11 7.99
    320 3.1415926535894005 3.9257e-13 7.79
    @@ -534,7 +534,7 @@
    320 3.1415926535894516 3.4157e-13 8.00
    1280 3.1415926535897918 1.5339e-15 7.80
    5120 3.1415926535897927 5.2649e-16 1.54
    -

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    +

    Indeed, here we get results that are approximately 50 times as accurate. On the other hand, when computing $\pi$ by the perimeter, we get this with double precision:

    5 3.1415921029432572 5.5065e-07 -
    20 3.1415926513737582 2.2160e-09 7.96
    80 3.1415926535810699 8.7232e-12 7.99
    320 3.1415926535897576 3.5527e-14 7.94
    @@ -546,7 +546,7 @@
    320 3.1415926535897576 3.5705e-14 7.93
    1280 3.1415926535897918 1.3785e-15 4.70
    5120 3.1415926535897944 1.3798e-15 -0.00
    -

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    +

    Here, using double precision is more accurate by about a factor of two. (Of course, in all cases, we have computed $\pi$ with more accuracy than any engineer would ever want to know.)

    What explains this unpredictability? In general, round-off errors can be thought of as random, and add up in ways that are not worth thinking too much about; we should therefore always treat any accuracy beyond, say, thirteen digits as suspect. Thus, it is probably not worth spending too much time on wondering why we get different winners and losers in the data type exchange from double and long double. The accuracy of the results is also largely not determined by the precision of the data type in which we accumulate each cell's (or face's) contributions, but the accuracy of what deal.II gives us via FEValues::JxW() and FEFaceValues::JxW(), which always uses double precision and which we cannot directly affect.

    But there are cases where one can do something about the precision, and it is worth at least mentioning the name of the most well-known algorithm in this area. Specifically, what we are doing when we add contributions into the area and perimeter values is that we are adding together positive numbers as we do here. In general, the round-off errors associated with each of these numbers is random, and if we add up contributions of substantially different sizes, then we will likely be dominated by the error in the largest contributions. One can avoid this by adding up numbers sorted by their size, and this may then result in marginally more accurate end results. The algorithm that implements this is typically called Kahan's summation algorithm. While one could play with it in the current context, it is likely not going to improve the accuracy in ways that will truly matter.

    The plain program

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-11-15 06:44:28.015658946 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_11.html 2024-11-15 06:44:28.015658946 +0000 @@ -123,55 +123,55 @@

    Introduction

    The problem we will be considering is the solution of Laplace's problem with Neumann boundary conditions only:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\Delta u &=& f \qquad \mathrm{in}\ \Omega,
   \\
   \partial_n u &=& g \qquad \mathrm{on}\ \partial\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2888.png"/>

    It is well known that if this problem is to have a solution, then the forces need to satisfy the compatibility condition

    -\[
+<picture><source srcset=\[
   \int_\Omega f\; dx + \int_{\partial\Omega} g\; ds = 0.
-\] +\]" src="form_2889.png"/>

    -

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    +

    We will consider the special case that $\Omega$ is the circle of radius 1 around the origin, and $f=-2$, $g=1$. This choice satisfies the compatibility condition.

    The compatibility condition allows a solution of the above equation, but it nevertheless retains an ambiguity: since only derivatives of the solution appear in the equations, the solution is only determined up to a constant. For this reason, we have to pose another condition for the numerical solution, which fixes this constant.

    For this, there are various possibilities:

    1. -

      Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

      +

      Fix one node of the discretization to zero or any other fixed value. This amounts to an additional condition $u_h(x_0)=0$. Although this is common practice, it is not necessarily a good idea, since we know that the solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution.

    2. -

      Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

      +

      Fixing the mean value over the domain to zero or any other value. This is allowed on the continuous level, since $H^1(\Omega)\subset L^1(\Omega)$ by Sobolev's inequality, and thus also on the discrete level since we there only consider subsets of $H^1$.

    3. -Fixing the mean value over the boundary of the domain to zero or any other value. This is also allowed on the continuous level, since $H^{1/2}(\partial\Omega)\subset L^1(\partial\Omega)$, again by Sobolev's inequality.
    4. +Fixing the mean value over the boundary of the domain to zero or any other value. This is also allowed on the continuous level, since $H^{1/2}(\partial\Omega)\subset L^1(\partial\Omega)$, again by Sobolev's inequality.

    We will choose the last possibility, since we want to demonstrate another technique with it.

    While this describes the problem to be solved, we still have to figure out how to implement it. Basically, except for the additional mean value constraint, we have solved this problem several times, using Dirichlet boundary values, and we only need to drop the treatment of Dirichlet boundary nodes. The use of higher order mappings is also rather trivial and will be explained at the various places where we use it; in almost all conceivable cases, you will only consider the objects describing mappings as a black box which you need not worry about, because their only uses seem to be to be passed to places deep inside the library where functions know how to handle them (i.e. in the FEValues classes and their descendants).

    The tricky point in this program is the use of the mean value constraint. Fortunately, there is a class in the library which knows how to handle such constraints, and we have used it quite often already, without mentioning its generality. Note that if we assume that the boundary nodes are spaced equally along the boundary, then the mean value constraint

    -\[
+<picture><source srcset=\[
   \int_{\partial \Omega} u(x) \; ds = 0
-\] +\]" src="form_2895.png"/>

    can be written as

    -\[
+<picture><source srcset=\[
   \sum_{i\in\partial\Omega_h} u_i = 0,
-\] +\]" src="form_2896.png"/>

    -

    where the sum shall run over all degree of freedom indices which are located on the boundary of the computational domain. Let us denote by $i_0$ that index on the boundary with the lowest number (or any other conveniently chosen index), then the constraint can also be represented by

    -\[
+<p> where the sum shall run over all degree of freedom indices which are located on the boundary of the computational domain. Let us denote by <picture><source srcset=$i_0$ that index on the boundary with the lowest number (or any other conveniently chosen index), then the constraint can also be represented by

    +\[
   u_{i_0} = \sum_{i\in\partial\Omega_h\backslash i_0} -u_i.
-\] +\]" src="form_2898.png"/>

    This, luckily, is exactly the form of constraints for which the AffineConstraints class was designed. Note that we have used this class in several previous examples for the representation of hanging nodes constraints, which also have this form: there, the middle vertex shall have the mean of the values of the adjacent vertices. In general, the AffineConstraints class is designed to handle affine constraints of the form

    -\[
+<picture><source srcset=\[
   CU = b
-\] +\]" src="form_2899.png"/>

    where $C$ denotes a matrix, $b$ denotes a vector, and $U$ the vector of nodal values. In this case, since $C$ represents one homogeneous constraint, $b$ is the zero vector.

    -

    In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

    +

    In this example, the mean value along the boundary allows just such a representation, with $C$ being a matrix with just one row (i.e. there is only one constraint). In the implementation, we will create an AffineConstraints object, add one constraint (i.e. add another row to the matrix) referring to the first boundary node $i_0$, and insert the weights with which all the other nodes contribute, which in this example happens to be just $-1$.

    Later, we will use this object to eliminate the first boundary node from the linear system of equations, reducing it to one which has a solution without the ambiguity of the constant shift value. One of the problems of the implementation will be that the explicit elimination of this node results in a number of additional elements in the matrix, of which we do not know in advance where they are located and how many additional entries will be in each of the rows of the matrix. We will show how we can use an intermediate object to work around this problem.

    But now on to the implementation of the program solving this problem...

    The commented program

    @@ -337,7 +337,7 @@

    That's quite simple, right?

    Two remarks are in order, though: First, these functions are used in a lot of contexts. Maybe you want to create a Laplace or mass matrix for a vector values finite element; or you want to use the default Q1 mapping; or you want to assembled the matrix with a coefficient in the Laplace operator. For this reason, there are quite a large number of variants of these functions in the MatrixCreator and MatrixTools namespaces. Whenever you need a slightly different version of these functions than the ones called above, it is certainly worthwhile to take a look at the documentation and to check whether something fits your needs.

    The second remark concerns the quadrature formula we use: we want to integrate over bilinear shape functions, so we know that we have to use at least an order two Gauss quadrature formula. On the other hand, we want the quadrature rule to have at least the order of the boundary approximation. Since the order of Gauss rule with $r$ points is $2r -
-   1$, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

    + 1$" src="form_2900.png"/>, and the order of the boundary approximation using polynomials of degree $p$ is $p+1$, we know that $2r \geq p$. Since r has to be an integer and (as mentioned above) has to be at least $2$, this makes up for the formula above computing gauss_degree.

    Since the generation of the body force contributions to the right hand side vector was so simple, we do that all over again for the boundary forces as well: allocate a vector of the right size and call the right function. The boundary function has constant values, so we can generate an object from the library on the fly, and we use the same quadrature formula as above, but this time of lower dimension since we integrate over faces now instead of cells:

      Vector<double> tmp(system_rhs.size());
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-11-15 06:44:28.063659375 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_12.html 2024-11-15 06:44:28.063659375 +0000 @@ -171,7 +171,7 @@

    When summing this expression over all cells $T$, the boundary integral is done over all internal and external faces and as such there are three cases:

    1. -outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
    2. +outer boundary on the inflow (we replace $u_h$ by given $g$): $\int_{\Gamma_-} v_h g \beta \cdot n$
    3. outer boundary on the outflow: $\int_{\Gamma_+} v_h u_h \beta \cdot n$
    4. @@ -189,7 +189,7 @@

      Here, $\mathbb T_h$ is the set of all active cells of the triangulation and $\mathbb F_h^i$ is the set of all active interior faces. This formulation is known as the upwind discontinuous Galerkin method.

      In order to implement this bilinear form, we need to compute the cell terms (first sum) using the usual way to achieve integration on a cell, the interface terms (second sum) using FEInterfaceValues, and the boundary terms (the other two terms). The summation of all those is done by MeshWorker::mesh_loop().

      The test problem

      -

      We solve the advection equation on $\Omega=[0,1]^2$ with ${\mathbf \beta}=\frac{1}{|x|}(-x_2, x_1)$ representing a circular counterclockwise flow field, and $g=1$ on ${\bf x}\in\Gamma_-^1 := [0,0.5]\times\{0\}$ and $g=0$ on ${\bf x}\in
+<p>We solve the advection equation on <picture><source srcset=$\Omega=[0,1]^2$ with ${\mathbf \beta}=\frac{1}{|x|}(-x_2, x_1)$ representing a circular counterclockwise flow field, and $g=1$ on ${\bf x}\in\Gamma_-^1 := [0,0.5]\times\{0\}$ and $g=0$ on ${\bf x}\in
 \Gamma_-\setminus \Gamma_-^1$.

      We solve on a sequence of meshes by refining the mesh adaptively by estimating the norm of the gradient on each cell. After solving on each mesh, we output the solution in vtk format and compute the $L^\infty$ norm of the solution. As the exact solution is either 0 or 1, we can measure the magnitude of the overshoot of the numerical solution with this.

      The commented program

      @@ -840,8 +840,8 @@

      In refinement iteration 5, the image can't be plotted in a reasonable way any more as a 3d plot. We thus show a color plot with a range of $[-1,2]$ (the solution values of the exact solution lie in $[0,1]$, of course). In any case, it is clear that the continuous Galerkin solution exhibits oscillatory behavior that gets worse and worse as the mesh is refined more and more.

      There are a number of strategies to stabilize the cG method, if one wants to use continuous elements for some reason. Discussing these methods is beyond the scope of this tutorial program; an interested reader could, for example, take a look at step-31.

      Possibilities for extensions

      -

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      -

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      +

      Given that the exact solution is known in this case, one interesting avenue for further extensions would be to confirm the order of convergence for this program. In the current case, the solution is non-smooth, and so we can not expect to get a particularly high order of convergence, even if we used higher order elements. But even if the solution is smooth, the equation is not elliptic and so it is not immediately clear that we should obtain a convergence order that equals that of the optimal interpolation estimates (i.e. for example that we would get $h^3$ convergence in the $L^2$ norm by using quadratic elements).

      +

      In fact, for hyperbolic equations, theoretical predictions often indicate that the best one can hope for is an order one half below the interpolation estimate. For example, for the streamline diffusion method (an alternative method to the DG method used here to stabilize the solution of the transport equation), one can prove that for elements of degree $p$, the order of convergence is $p+\frac 12$ on arbitrary meshes. While the observed order is frequently $p+1$ on uniformly refined meshes, one can construct so-called Peterson meshes on which the worse theoretical bound is actually attained. This should be relatively simple to verify, for example using the VectorTools::integrate_difference function.

      A different direction is to observe that the solution of transport problems often has discontinuities and that therefore a mesh in which we bisect every cell in every coordinate direction may not be optimal. Rather, a better strategy would be to only cut cells in the direction parallel to the discontinuity. This is called anisotropic mesh refinement and is the subject of step-30.

      The plain program

      /* ------------------------------------------------------------------------
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_13.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_13.html 2024-11-15 06:44:28.123659911 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_13.html 2024-11-15 06:44:28.123659911 +0000 @@ -187,7 +187,7 @@

      Once you have worked through the program, you will remark that it is already somewhat complex in its structure. Nevertheless, it only has about 850 lines of code, without comments. In real applications, there would of course be comments and class documentation, which would bring that to maybe 1200 lines. Yet, compared to the applications listed above, this is still small, as they are 20 to 25 times as large. For programs as large, a proper design right from the start is thus indispensable. Otherwise, it will have to be redesigned at one point in its life, once it becomes too large to be manageable.

      Despite of this, all three programs listed above have undergone major revisions, or even rewrites. The wave program, for example, was once entirely teared to parts when it was still significantly smaller, just to assemble it again in a more modular form. By that time, it had become impossible to add functionality without affecting older parts of the code (the main problem with the code was the data flow: in time dependent application, the major concern is when to store data to disk and when to reload it again; if this is not done in an organized fashion, then you end up with data released too early, loaded too late, or not released at all). Although the present example program thus draws from several years of experience, it is certainly not without flaws in its design, and in particular might not be suited for an application where the objective is different. It should serve as an inspiration for writing your own application in a modular way, to avoid the pitfalls of too closely coupled codes.

      What the program does

      -

      What the program actually does is not even the main point of this program, the structure of the program is more important. However, in a few words, a description would be: solve the Laplace equation for a given right hand side such that the solution is the function $u(x,t)=\exp(x+\sin(10y+5x^2))$. The goal of the computation is to get the value of the solution at the point $x_0=(0.5,0.5)$, and to compare the accuracy with which we resolve this value for two refinement criteria, namely global refinement and refinement by the error indicator by Kelly et al. which we have already used in previous examples.

      +

      What the program actually does is not even the main point of this program, the structure of the program is more important. However, in a few words, a description would be: solve the Laplace equation for a given right hand side such that the solution is the function $u(x,t)=\exp(x+\sin(10y+5x^2))$. The goal of the computation is to get the value of the solution at the point $x_0=(0.5,0.5)$, and to compare the accuracy with which we resolve this value for two refinement criteria, namely global refinement and refinement by the error indicator by Kelly et al. which we have already used in previous examples.

      The results will, as usual, be discussed in the respective section of this document. In doing so, we will find a slightly irritating observation about the relative performance of the two refinement criteria. In a later example program, building atop this one, we will devise a different method that should hopefully perform better than the techniques discussed here.

      So much now for all the theoretical and anecdotal background. The best way of learning about a program is to look at it, so here it is:

      The commented program

      @@ -934,7 +934,7 @@
      void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())

      Equation data

      As this is one more academic example, we'd like to compare exact and computed solution against each other. For this, we need to declare function classes representing the exact solution (for comparison and for the Dirichlet boundary values), as well as a class that denotes the right hand side of the equation (this is simply the Laplace operator applied to the exact solution we'd like to recover).

      -

      For this example, let us choose as exact solution the function $u(x,y)=exp(x+sin(10y+5x^2))$. In more than two dimensions, simply repeat the sine-factor with y replaced by z and so on. Given this, the following two classes are probably straightforward from the previous examples.

      +

      For this example, let us choose as exact solution the function $u(x,y)=exp(x+sin(10y+5x^2))$. In more than two dimensions, simply repeat the sine-factor with y replaced by z and so on. Given this, the following two classes are probably straightforward from the previous examples.

        template <int dim>
        class Solution : public Function<dim>
        {
      @@ -1184,9 +1184,9 @@

      While we're already at watching pictures, this is the eighth grid, as viewed from top:

      -

      However, we are not yet finished with evaluation the point value computation. In fact, plotting the error $e=|u(x_0)-u_h(x_0)|$ for the two refinement criteria yields the following picture:

      +

      However, we are not yet finished with evaluation the point value computation. In fact, plotting the error $e=|u(x_0)-u_h(x_0)|$ for the two refinement criteria yields the following picture:

      -

      What is disturbing about this picture is that not only is the adaptive mesh refinement not better than global refinement as one would usually expect, it is even significantly worse since its convergence is irregular, preventing all extrapolation techniques when using the values of subsequent meshes! On the other hand, global refinement provides a perfect $1/N$ or $h^{-2}$ convergence history and provides every opportunity to even improve on the point values by extrapolation. Global mesh refinement must therefore be considered superior in this example! This is even more surprising as the evaluation point is not somewhere in the left part where the mesh is coarse, but rather to the right and the adaptive refinement should refine the mesh around the evaluation point as well.

      +

      What is disturbing about this picture is that not only is the adaptive mesh refinement not better than global refinement as one would usually expect, it is even significantly worse since its convergence is irregular, preventing all extrapolation techniques when using the values of subsequent meshes! On the other hand, global refinement provides a perfect $1/N$ or $h^{-2}$ convergence history and provides every opportunity to even improve on the point values by extrapolation. Global mesh refinement must therefore be considered superior in this example! This is even more surprising as the evaluation point is not somewhere in the left part where the mesh is coarse, but rather to the right and the adaptive refinement should refine the mesh around the evaluation point as well.

      We thus close the discussion of this example program with a question:

      What is wrong with adaptivity if it is not better than global refinement?

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-11-15 06:44:28.243660982 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_14.html 2024-11-15 06:44:28.247661019 +0000 @@ -176,30 +176,30 @@

      The Heidelberg group of Professor Rolf Rannacher, to which the three initial authors of the deal.II library belonged during their PhD time and partly also afterwards, has been involved with adaptivity and error estimation for finite element discretizations since the mid-1990ies. The main achievement is the development of error estimates for arbitrary functionals of the solution, and of optimal mesh refinement for its computation.

      We will not discuss the derivation of these concepts in too great detail, but will implement the main ideas in the present example program. For a thorough introduction into the general idea, we refer to the seminal work of Becker and Rannacher [BR95], [BR96r], and the overview article of the same authors in Acta Numerica [BR01]; the first introduces the concept of error estimation and adaptivity for general functional output for the Laplace equation, while the second gives many examples of applications of these concepts to a large number of other, more complicated equations. For applications to individual types of equations, see also the publications by Becker [Bec95], [Bec98], Kanschat [Kan96], [FK97], Suttmeier [Sut96], [RS97], [RS98c], [RS99], Bangerth [BR99b], [Ban00w], [BR01a], [Ban02], and Hartmann [Har02], [HH01], [HH01b]. All of these works, from the original introduction by Becker and Rannacher to individual contributions to particular equations, have later been summarized in a book by Bangerth and Rannacher that covers all of these topics, see [BR03].

      The basic idea is the following: in applications, one is not usually interested in the solution per se, but rather in certain aspects of it. For example, in simulations of flow problems, one may want to know the lift or drag of a body immersed in the fluid; it is this quantity that we want to know to best accuracy, and whether the rest of the solution of the describing equations is well resolved is not of primary interest. Likewise, in elasticity one might want to know about values of the stress at certain points to guess whether maximal load values of joints are safe, for example. Or, in radiative transfer problems, mean flux intensities are of interest.

      -

      In all the cases just listed, it is the evaluation of a functional $J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      -

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      -\[
+<p>In all the cases just listed, it is the evaluation of a functional <picture><source srcset=$J(u)$ of the solution which we are interested in, rather than the values of $u$ everywhere. Since the exact solution $u$ is not available, but only its numerical approximation $u_h$, it is sensible to ask whether the computed value $J(u_h)$ is within certain limits of the exact value $J(u)$, i.e. we want to bound the error with respect to this functional, $J(u)-J(u_h)$.

      +

      For simplicity of exposition, we henceforth assume that both the quantity of interest $J$ as well as the equation are linear, and we will in particular show the derivation for the Laplace equation with homogeneous Dirichlet boundary conditions, although the concept is much more general. For this general case, we refer to the references listed above. The goal is to obtain bounds on the error, $J(e)=J(u)-J(u_h)$. For this, let us denote by $z$ the solution of a dual problem, defined as follows:

      +\[
   a(\varphi,z) = J(\varphi) \qquad \forall \varphi,
-\] +\]" src="form_2946.png"/>

      -

      where $a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      -\[
+<p> where <picture><source srcset=$a(\cdot,\cdot)$ is the bilinear form associated with the differential equation, and the test functions are chosen from the corresponding solution space. Then, taking as special test function $\varphi=e$ the error, we have that

      +\[
   J(e) = a(e,z)
-\] +\]" src="form_2949.png"/>

      and we can, by Galerkin orthogonality, rewrite this as

      -\[
+<picture><source srcset=\[
   J(e) = a(e,z-\varphi_h)
-\] +\]" src="form_2950.png"/>

      -

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      +

      where $\varphi_h$ can be chosen from the discrete test space in whatever way we find convenient.

      Concretely, for Laplace's equation, the error identity reads

      -\[
+<picture><source srcset=\[
   J(e) = (\nabla e, \nabla(z-\varphi_h)).
-\] +\]" src="form_2952.png"/>

      Because we want to use this formula not only to compute error, but also to refine the mesh, we need to rewrite the expression above as a sum over cells where each cell's contribution can then be used as an error indicator for this cell. Thus, we split the scalar products into terms for each cell, and integrate by parts on each of them:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (\nabla (u-u_h), \nabla (z-\varphi_h))_K
@@ -207,54 +207,54 @@
   &=&
   \sum_K (-\Delta (u-u_h), z-\varphi_h)_K
   + (\partial_n (u-u_h), z-z_h)_{\partial K}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2953.png"/>

      -

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      +

      Next we use that $-\Delta u=f$, and that the solution of the Laplace equation is smooth enough that $\partial_n u$ is continuous almost everywhere – so the terms involving $\partial_n u$ on one cell cancels with that on its neighbor, where the normal vector has the opposite sign. (The same is not true for $\partial_n u_h$, though.) At the boundary of the domain, where there is no neighbor cell with which this term could cancel, the weight $z-\varphi_h$ can be chosen as zero, and the whole term disappears.

      Thus, we have

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - (\partial_n u_h, z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2958.png"/>

      -

      In a final step, note that when taking the normal derivative of $u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      -\begin{eqnarray*}
+<p> In a final step, note that when taking the normal derivative of <picture><source srcset=$u_h$, we mean the value of this quantity as taken from this side of the cell (for the usual Lagrange elements, derivatives are not continuous across edges). We then rewrite the above formula by exchanging half of the edge integral of cell $K$ with the neighbor cell $K'$, to obtain

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-\varphi_h)_K
   - \frac 12 (\partial_n u_h|_K + \partial_{n'} u_h|_{K'},
               z-\varphi_h)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2959.png"/>

      -

      Using that for the normal vectors on adjacent cells we have $n'=-n$, we define the jump of the normal derivative by

      -\[
+<p> Using that for the normal vectors on adjacent cells we have <picture><source srcset=$n'=-n$, we define the jump of the normal derivative by

      +\[
   [\partial_n u_h] \dealcoloneq \partial_n u_h|_K + \partial_{n'} u_h|_{K'}
   =
   \partial_n u_h|_K - \partial_n u_h|_{K'},
-\] +\]" src="form_2961.png"/>

      -

      and get the final form after setting the discrete function $\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      -\begin{eqnarray*}
+<p> and get the final form after setting the discrete function <picture><source srcset=$\varphi_h$, which is by now still arbitrary, to the point interpolation of the dual solution, $\varphi_h=I_h z$:

      +\begin{eqnarray*}
   J(e)
   &=&
   \sum_K (f+\Delta u_h, z-I_h z)_K
   - \frac 12 ([\partial_n u_h],
               z-I_h z)_{\partial K\backslash \partial\Omega}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_2963.png"/>

      -

      With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals $J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      -

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      -\[
+<p>With this, we have obtained an exact representation of the error of the finite element discretization with respect to arbitrary (linear) functionals <picture><source srcset=$J(\cdot)$. Its structure is a weighted form of a residual estimator, as both $f+\Delta u_h$ and $[\partial_n u_h]$ are cell and edge residuals that vanish on the exact solution, and $z-I_h z$ are weights indicating how important the residual on a certain cell is for the evaluation of the given functional. Furthermore, it is a cell-wise quantity, so we can use it as a mesh refinement criterion. The question is: how to evaluate it? After all, the evaluation requires knowledge of the dual solution $z$, which carries the information about the quantity we want to know to best accuracy.

      +

      In some, very special cases, this dual solution is known. For example, if the functional $J(\cdot)$ is the point evaluation, $J(\varphi)=\varphi(x_0)$, then the dual solution has to satisfy

      +\[
   -\Delta z = \delta(x-x_0),
-\] +\]" src="form_2969.png"/>

      with the Dirac delta function on the right hand side, and the dual solution is the Green's function with respect to the point $x_0$. For simple geometries, this function is analytically known, and we could insert it into the error representation formula.

      -

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      +

      However, we do not want to restrict ourselves to such special cases. Rather, we will compute the dual solution numerically, and approximate $z$ by some numerically obtained $\tilde z$. We note that it is not sufficient to compute this approximation $\tilde z$ using the same method as used for the primal solution $u_h$, since then $\tilde z-I_h \tilde z=0$, and the overall error estimate would be zero. Rather, the approximation $\tilde z$ has to be from a larger space than the primal finite element space. There are various ways to obtain such an approximation (see the cited literature), and we will choose to compute it with a higher order finite element space. While this is certainly not the most efficient way, it is simple since we already have all we need to do that in place, and it also allows for simple experimenting. For more efficient methods, again refer to the given literature, in particular [BR95], [BR03].

      With this, we end the discussion of the mathematical side of this program and turn to the actual implementation.

      -
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
-u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
-u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.
      +
      Note
      There are two steps above that do not seem necessary if all you care about is computing the error: namely, (i) the subtraction of $\phi_h$ from $z$, and (ii) splitting the integral into a sum of cells and integrating by parts on each. Indeed, neither of these two steps change $J(e)$ at all, as we only ever consider identities above until the substitution of $z$ by $\tilde z$. In other words, if you care only about estimating the global error $J(e)$, then these steps are not necessary. On the other hand, if you want to use the error estimate also as a refinement criterion for each cell of the mesh, then it is necessary to (i) break the estimate into a sum of cells, and (ii) massage the formulas in such a way that each cell's contributions have something to do with the local error. (While the contortions above do not change the value of the sum $J(e)$, they change the values we compute for each cell $K$.) To this end, we want to write everything in the form "residual times dual weight" where a "residual" is something that goes to zero as the approximation becomes $u_h$ better and better. For example, the quantity $\partial_n
+u_h$ is not a residual, since it simply converges to the (normal component of) the gradient of the exact solution. On the other hand, $[\partial_n u_h]$ is a residual because it converges to $[\partial_n
+u]=0$. All of the steps we have taken above in developing the final form of $J(e)$ have indeed had the goal of bringing the final formula into a form where each term converges to zero as the discrete solution $u_h$ converges to $u$. This then allows considering each cell's contribution as an "error indicator" that also converges to zero – as it should as the mesh is refined.

      The software

      The step-14 example program builds heavily on the techniques already used in the step-13 program. Its implementation of the dual weighted residual error estimator explained above is done by deriving a second class, properly called DualSolver, from the Solver base class, and having a class (WeightedResidual) that joins the two again and controls the solution of the primal and dual problem, and then uses both to compute the error indicator for mesh refinement.

      The program continues the modular concept of the previous example, by implementing the dual functional, describing quantity of interest, by an abstract base class, and providing two different functionals which implement this interface. Adding a different quantity of interest is thus simple.

      @@ -2589,15 +2589,15 @@

      Note the subtle interplay between resolving the corner singularities, and resolving around the point of evaluation. It will be rather difficult to generate such a mesh by hand, as this would involve to judge quantitatively how much which of the four corner singularities should be resolved, and to set the weight compared to the vicinity of the evaluation point.

      The program prints the point value and the estimated error in this quantity. From extrapolating it, we can guess that the exact value is somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get almost 6 valid digits from only 22,000 (primal) degrees of freedom. This number cannot be obtained from the value of the functional alone, but I have used the assumption that the error estimator is mostly exact, and extrapolated the computed value plus the estimated error, to get an approximation of the true value. Computing with more degrees of freedom shows that this assumption is indeed valid.

      -

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      +

      From the computed results, we can generate two graphs: one that shows the convergence of the error $J(u)-J(u_h)$ (taking the extrapolated value as correct) in the point value, and the value that we get by adding up computed value $J(u_h)$ and estimated error eta (if the error estimator $eta$ were exact, then the value $J(u_h)+\eta$ would equal the exact point value, and the error in this quantity would always be zero; however, since the error estimator is only a - good - approximation to the true error, we can by this only reduce the size of the error). In this graph, we also indicate the complexity ${\cal O}(1/N)$ to show that mesh refinement acts optimal in this case. The second chart compares true and estimated error, and shows that the two are actually very close to each other, even for such a complicated quantity as the point value:

      Comparing refinement criteria

      -

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      +

      Since we have accepted quite some effort when using the mesh refinement driven by the dual weighted error estimator (for solving the dual problem, and for evaluating the error representation), it is worth while asking whether that effort was successful. To this end, we first compare the achieved error levels for different mesh refinement criteria. To generate this data, simply change the value of the mesh refinement criterion variable in the main program. The results are thus (for the weight in the Kelly indicator, we have chosen the function $1/(r^2+0.1^2)$, where $r$ is the distance to the evaluation point; it can be shown that this is the optimal weight if we neglect the effects of boundaries):

      -

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      +

      Checking these numbers, we see that for global refinement, the error is proportional to $O(1/(sqrt(N) log(N)))$, and for the dual estimator $O(1/N)$. Generally speaking, we see that the dual weighted error estimator is better than the other refinement indicators, at least when compared with those that have a similarly regular behavior. The Kelly indicator produces smaller errors, but jumps about the picture rather irregularly, with the error also changing signs sometimes. Therefore, its behavior does not allow to extrapolate the results to larger values of N. Furthermore, if we trust the error estimates of the dual weighted error estimator, the results can be improved by adding the estimated error to the computed values. In terms of reliability, the weighted estimator is thus better than the Kelly indicator, although the latter sometimes produces smaller errors.

      Evaluation of point stresses

      Besides evaluating the values of the solution at a certain point, the program also offers the possibility to evaluate the x-derivatives at a certain point, and also to tailor mesh refinement for this. To let the program compute these quantities, simply replace the two occurrences of PointValueEvaluation in the main function by PointXDerivativeEvaluation, and let the program run:

      Refinement cycle: 0
      Number of degrees of freedom: 72
      @@ -2649,16 +2649,16 @@ -

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      -

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      +

      Note the asymmetry of the grids compared with those we obtained for the point evaluation. This is due to the fact that the domain and the primal solution may be symmetric about the diagonal, but the $x$-derivative is not, and the latter enters the refinement criterion.

      +

      Then, it is interesting to compare actually computed values of the quantity of interest (i.e. the x-derivative of the solution at one point) with a reference value of -0.0528223... plus or minus 0.0000005. We get this reference value by computing on finer grid after some more mesh refinements, with approximately 130,000 cells. Recall that if the error is $O(1/N)$ in the optimal case, then taking a mesh with ten times more cells gives us one additional digit in the result.

      In the left part of the following chart, you again see the convergence of the error towards this extrapolated value, while on the right you see a comparison of true and estimated error:

      -

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      +

      After an initial phase where the true error changes its sign, the estimated error matches it quite well, again. Also note the dramatic improvement in the error when using the estimated error to correct the computed value of $J(u_h)$.

      step-13 revisited

      -

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      +

      If instead of the Exercise_2_3 data set, we choose CurvedRidges in the main function, and choose $(0.5,0.5)$ as the evaluation point, then we can redo the computations of the previous example program, to compare whether the results obtained with the help of the dual weighted error estimator are better than those we had previously.

      First, the meshes after 9 adaptive refinement cycles obtained with the point evaluation and derivative evaluation refinement criteria, respectively, look like this:

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-11-15 06:44:28.303661518 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_15.html 2024-11-15 06:44:28.315661626 +0000 @@ -156,41 +156,41 @@

      Introduction

      Foreword

      -

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      +

      This program deals with an example of a non-linear elliptic partial differential equation, the minimal surface equation. You can imagine the solution of this equation to describe the surface spanned by a soap film that is enclosed by a closed wire loop. We imagine the wire to not just be a planar loop, but in fact curved. The surface tension of the soap film will then reduce the surface to have minimal surface. The solution of the minimal surface equation describes this shape with the wire's vertical displacement as a boundary condition. For simplicity, we will here assume that the surface can be written as a graph $u=u(x,y)$ although it is clear that it is not very hard to construct cases where the wire is bent in such a way that the surface can only locally be constructed as a graph but not globally.

      Because the equation is non-linear, we can't solve it directly. Rather, we have to use Newton's method to compute the solution iteratively.

      Note
      The material presented here is also discussed in video lecture 31.5, video lecture 31.55, video lecture 31.6. (All video lectures are also available here.) (See also video lecture 31.65, video lecture 31.7.)

      Classical formulation

      In a classical sense, the problem is given in the following form:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_2984.png"/>

      -

      $\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      -

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      -\begin{align*}
+<p><picture><source srcset=$\Omega$ is the domain we get by projecting the wire's positions into $x-y$ space. In this example, we choose $\Omega$ as the unit disk.

      +

      As described above, we solve this equation using Newton's method in which we compute the $n$th approximate solution from the $(n-1)$th one, and use a damping parameter $\alpha^n$ to get better global convergence behavior:

      +\begin{align*}
     F'(u^{n},\delta u^{n})&=- F(u^{n})
     \\
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_2988.png"/>

      with

      -\[
+<picture><source srcset=\[
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
-  \] + \]" src="form_2989.png"/>

      -

      and $F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      -\[
+<p> and <picture><source srcset=$F'(u,\delta u)$ the derivative of F in direction of $\delta u$:

      +\[
   F'(u,\delta u)=\lim \limits_{\epsilon \rightarrow 0}{\frac{F(u+\epsilon \delta u)-
   F(u)}{\epsilon}}.
-\] +\]" src="form_2992.png"/>

      -

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      +

      Going through the motions to find out what $F'(u,\delta u)$ is, we find that we have to solve a linear elliptic PDE in every Newton step, with $\delta u^n$ as the solution of:

      -\[
+<picture><source srcset=\[
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right) +
   \nabla \cdot \left( \frac{\nabla u^{n} \cdot
@@ -198,62 +198,62 @@
   \right)  =
   -\left( - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}
   \nabla u^{n} \right) \right)
-  \] + \]" src="form_2994.png"/>

      -

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      -

      Summing up, we have to solve the PDE above with the boundary condition $\delta
-u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      -
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.
      +

      In order to solve the minimal surface equation, we have to solve this equation repeatedly, once per Newton step. To solve this, we have to take a look at the boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution $u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$.

      +

      Summing up, we have to solve the PDE above with the boundary condition $\delta
+u^{0}=g$ in the first step and with $\delta u^{n}=0$ in all the following steps.

      +
      Note
      In some sense, one may argue that if the program already implements $F(u)$, it is duplicative to also have to implement $F'(u,\delta)$. As always, duplication tempts bugs and we would like to avoid it. While we do not explore this issue in this program, we will come back to it at the end of the Possibilities for extensions section below, and specifically in step-72.

      Weak formulation of the problem

      -

      Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function $\varphi$ and integrating by parts on both sides:

      -\[
+<p>Starting with the strong formulation above, we get the weak formulation by multiplying both sides of the PDE with a test function <picture><source srcset=$\varphi$ and integrating by parts on both sides:

      +\[
   \left( \nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}}\nabla
   \delta u^{n} \right)-\left(\nabla \varphi ,\frac{\nabla u^{n} \cdot \nabla
   \delta u^{n}}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{3}{2}}}\nabla u^{n}  \right)
   = -\left(\nabla \varphi , \frac{1}{\left(1+|\nabla u^{n}|^{2}\right)^{\frac{1}{2}}} \nabla u^{n}
    \right).
-  \] + \]" src="form_3002.png"/>

      -

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
-\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      +

      Here the solution $\delta u^{n}$ is a function in $H^{1}(\Omega)$, subject to the boundary conditions discussed above. Reducing this space to a finite dimensional space with basis $\left\{
+\varphi_{0},\dots , \varphi_{N-1}\right\}$, we can write the solution:

      -\[
+<picture><source srcset=\[
   \delta u^{n}=\sum_{j=0}^{N-1} \delta U_{j} \varphi_{j}.
-\] +\]" src="form_3005.png"/>

      -

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      +

      Using the basis functions as test functions and defining $a_{n} \dealcoloneq \frac{1}
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:

      -\[
+<picture><source srcset=\[
   \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right) \right] \cdot \delta U_{j}=-\left( \nabla \varphi_{i} , a_{n}
   \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1,
-\] +\]" src="form_3007.png"/>

      -

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      +

      where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as:

      -\[
+<picture><source srcset=\[
   A^{n}\; \delta U^{n}=b^{n},
-\] +\]" src="form_3009.png"/>

      -

      where the entries of the matrix $A^{n}$ are given by:

      +

      where the entries of the matrix $A^{n}$ are given by:

      -\[
+<picture><source srcset=\[
   A^{n}_{ij} \dealcoloneq \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
   \left(\nabla u^{n}\cdot \nabla \varphi_{i} , a_{n}^{3} \nabla u^{n} \cdot \nabla
   \varphi_{j} \right),
-\] +\]" src="form_3011.png"/>

      -

      and the right hand side $b^{n}$ is given by:

      +

      and the right hand side $b^{n}$ is given by:

      -\[
+<picture><source srcset=\[
   b^{n}_{i} \dealcoloneq -\left( \nabla \varphi_{i} , a_{n} \nabla u^{n}\right).
-\] +\]" src="form_3013.png"/>

      Questions about the appropriate solver

      The matrix that corresponds to the Newton step above can be reformulated to show its structure a bit better. Rewriting it slightly, we get that it has the form

      -\[
+<picture><source srcset=\[
   A_{ij}
   =
   \left(
@@ -261,10 +261,10 @@
     B
     \nabla \varphi_j
   \right),
-\] +\]" src="form_3014.png"/>

      -

      where the matrix $B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      -\[
+<p> where the matrix <picture><source srcset=$B$ (of size $d \times d$ in $d$ space dimensions) is given by the following expression:

      +\[
   B
   =
   a_n \left\{
@@ -279,44 +279,44 @@
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}} \otimes
   \frac{\nabla u_n}{\sqrt{1+|\nabla u^{n}|^{2}}}
   \right\}.
-\] +\]" src="form_3016.png"/>

      -

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
-\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      -

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      +

      From this expression, it is obvious that $B$ is symmetric, and so $A$ is symmetric as well. On the other hand, $B$ is also positive definite, which confers the same property onto $A$. This can be seen by noting that the vector $v_1 =
+\frac{\nabla u^n}{|\nabla u^n|}$ is an eigenvector of $B$ with eigenvalue $\lambda_1=a_n \left(1-\frac{|\nabla u^n|^2}{1+|\nabla u^n|^2}\right) > 0$ while all vectors $v_2\ldots v_d$ that are perpendicular to $v_1$ and each other are eigenvectors with eigenvalue $a_n$. Since all eigenvalues are positive, $B$ is positive definite and so is $A$. We can thus use the CG method for solving the Newton steps. (The fact that the matrix $A$ is symmetric and positive definite should not come as a surprise. It results from taking the derivative of an operator that results from taking the derivative of an energy functional: the minimal surface equation simply minimizes some non-quadratic energy. Consequently, the Newton matrix, as the matrix of second derivatives of a scalar energy, must be symmetric since the derivative with regard to the $i$th and $j$th degree of freedom should clearly commute. Likewise, if the energy functional is convex, then the matrix of second derivatives must be positive definite, and the direct calculation above simply reaffirms this.)

      +

      It is worth noting, however, that the positive definiteness degenerates for problems where $\nabla u$ becomes large. In other words, if we simply multiply all boundary values by 2, then to first order $u$ and $\nabla u$ will also be multiplied by two, but as a consequence the smallest eigenvalue of $B$ will become smaller and the matrix will become more ill-conditioned. (More specifically, for $|\nabla u^n|\rightarrow\infty$ we have that $\lambda_1 \propto a_n \frac{1}{|\nabla u^n|^2}$ whereas $\lambda_2\ldots \lambda_d=a_n$; thus, the condition number of $B$, which is a multiplicative factor in the condition number of $A$ grows like ${\cal O}(|\nabla u^n|^2)$.) It is simple to verify with the current program that indeed multiplying the boundary values used in the current program by larger and larger values results in a problem that will ultimately no longer be solvable using the simple preconditioned CG method we use here.

      Choice of step length and globalization

      -

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
-\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      -

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      +

      As stated above, Newton's method works by computing a direction $\delta u^n$ and then performing the update $u^{n+1} = u^{n}+\alpha^n
+\delta u^{n}$ with a step length $0 < \alpha^n \le 1$. It is a common observation that for strongly nonlinear models, Newton's method does not converge if we always choose $\alpha^n=1$ unless one starts with an initial guess $u^0$ that is sufficiently close to the solution $u$ of the nonlinear problem. In practice, we don't always have such an initial guess, and consequently taking full Newton steps (i.e., using $\alpha=1$) does frequently not work.

      +

      A common strategy therefore is to use a smaller step length for the first few steps while the iterate $u^n$ is still far away from the solution $u$ and as we get closer use larger values for $\alpha^n$ until we can finally start to use full steps $\alpha^n=1$ as we are close enough to the solution. The question is of course how to choose $\alpha^n$. There are basically two widely used approaches: line search and trust region methods.

      In this program, we simply always choose the step length equal to 0.1. This makes sure that for the testcase at hand we do get convergence although it is clear that by not eventually reverting to full step lengths we forego the rapid, quadratic convergence that makes Newton's method so appealing. Obviously, this is a point one eventually has to address if the program was made into one that is meant to solve more realistic problems. We will comment on this issue some more in the results section, and use an even better approach in step-77.

      Summary of the algorithm and testcase

      Overall, the program we have here is not unlike step-6 in many regards. The layout of the main class is essentially the same. On the other hand, the driving algorithm in the run() function is different and works as follows:

      1. -

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

        +

        Start with the function $u^{0}\equiv 0$ and modify it in such a way that the values of $u^0$ along the boundary equal the correct boundary values $g$ (this happens in the call to AffineConstraints::distribute()). Set $n=0$.

      2. -

        Compute the Newton update by solving the system $A^{n}\;\delta
-  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

        +

        Compute the Newton update by solving the system $A^{n}\;\delta
+  U^{n}=b^{n}$ with boundary condition $\delta u^{n}=0$ on $\partial \Omega$.

      3. -

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        +

        Compute a step length $\alpha^n$. In this program, we always set $\alpha^n=0.1$. To make things easier to extend later on, this happens in a function of its own, namely in MinimalSurfaceProblem::determine_step_length. (The strategy of always choosing $\alpha^n=0.1$ is of course not optimal – we should choose a step length that works for a given search direction – but it requires a bit of work to do that. In the end, we leave these sorts of things to external packages: step-77 does that.)

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-11-15 06:44:28.359662019 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_16.html 2024-11-15 06:44:28.359662019 +0000 @@ -153,7 +153,7 @@
        -

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        +

        The fine level in this mesh consists only of the degrees of freedom that are defined on the refined cells, but does not extend to that part of the domain that is not refined. While this guarantees that the overall effort grows as ${\cal O}(N)$ as necessary for optimal multigrid complexity, it leads to problems when defining where to smooth and what boundary conditions to pose for the operators defined on individual levels if the level boundary is not an external boundary. These questions are discussed in detail in the article cited above.

        The testcase

        The problem we solve here is similar to step-6, with two main differences: first, the multigrid preconditioner, obviously. We also change the discontinuity of the coefficients such that the local assembler does not look more complicated than necessary.

        The commented program

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-11-15 06:44:28.439662733 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_18.html 2024-11-15 06:44:28.443662769 +0000 @@ -167,23 +167,23 @@

        Quasistatic elastic deformation

        Motivation of the model

        In general, time-dependent small elastic deformations are described by the elastic wave equation

        -\[
+<picture><source srcset=\[
   \rho \frac{\partial^2 \mathbf{u}}{\partial t^2}
   + c \frac{\partial \mathbf{u}}{\partial t}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) = \mathbf{f}
   \qquad
   \textrm{in}\ \Omega,
-\] +\]" src="form_3065.png"/>

        -

        where $\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        -\[
+<p> where <picture><source srcset=$\mathbf{u}=\mathbf{u} (\mathbf{x},t)$ is the deformation of the body, $\rho$ and $c$ the density and attenuation coefficient, and $\mathbf{f}$ external forces. In addition, initial conditions

        +\[
   \mathbf{u}(\cdot, 0) = \mathbf{u}_0(\cdot)
   \qquad
   \textrm{on}\ \Omega,
-\] +\]" src="form_3068.png"/>

        and Dirichlet (displacement) or Neumann (traction) boundary conditions need to be specified for a unique solution:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf{u}(\mathbf{x},t) &=& \mathbf{d}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_D\subset\partial\Omega,
@@ -191,12 +191,12 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3069.png"/>

        -

        In above formulation, $\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
-\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        -

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        -\begin{eqnarray*}
+<p> In above formulation,  <picture><source srcset=$\varepsilon(\mathbf{u})= \frac 12 (\nabla \mathbf{u} + \nabla
+\mathbf{u}^T)$ is the symmetric gradient of the displacement, also called the strain. $C$ is a tensor of rank 4, called the stress-strain tensor (the inverse of the compliance tensor) that contains knowledge of the elastic strength of the material; its symmetry properties make sure that it maps symmetric tensors of rank 2 (“matrices” of dimension $d$, where $d$ is the spatial dimensionality) onto symmetric tensors of the same rank. We will comment on the roles of the strain and stress tensors more below. For the moment it suffices to say that we interpret the term $\textrm{div}\  ( C \varepsilon(\mathbf{u}))$ as the vector with components $\frac \partial{\partial x_j} C_{ijkl} \varepsilon(\mathbf{u})_{kl}$, where summation over indices $j,k,l$ is implied.

        +

        The quasistatic limit of this equation is motivated as follows: each small perturbation of the body, for example by changes in boundary condition or the forcing function, will result in a corresponding change in the configuration of the body. In general, this will be in the form of waves radiating away from the location of the disturbance. Due to the presence of the damping term, these waves will be attenuated on a time scale of, say, $\tau$. Now, assume that all changes in external forcing happen on times scales that are much larger than $\tau$. In that case, the dynamic nature of the change is unimportant: we can consider the body to always be in static equilibrium, i.e. we can assume that at all times the body satisfies

        +\begin{eqnarray*}
   - \textrm{div}\  ( C \varepsilon(\mathbf{u})) &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega,
@@ -208,13 +208,13 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3075.png"/>

        -

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        +

        Note that the differential equation does not contain any time derivatives any more – all time dependence is introduced through boundary conditions and a possibly time-varying force function $\mathbf{f}(\mathbf{x},t)$. The changes in configuration can therefore be considered as being stationary instantaneously. An alternative view of this is that $t$ is not really a time variable, but only a time-like parameter that governs the evolution of the problem.

        While these equations are sufficient to describe small deformations, computing large deformations is a little more complicated and, in general, leads to nonlinear equations such as those treated in step-44. In the following, let us consider some of the tools one would employ when simulating problems in which the deformation becomes large.

        Note
        The model we will consider below is not founded on anything that would be mathematically sound: we will consider a model in which we produce a small deformation, deform the physical coordinates of the body by this deformation, and then consider the next loading step again as a linear problem. This isn't consistent, since the assumption of linearity implies that deformations are infinitesimal and so moving around the vertices of our mesh by a finite amount before solving the next linear problem is an inconsistent approach. We should therefore note that it is not surprising that the equations discussed below can't be found in the literature: The model considered here has little to do with reality! On the other hand, the implementation techniques we consider are very much what one would need to use when implementing a real model, as we will see in step-44.
        -

        To come back to defining our "artificial" model, let us first introduce a tensorial stress variable $\sigma$, and write the differential equations in terms of the stress:

        -\begin{eqnarray*}
+<p>To come back to defining our $\sigma$, and write the differential equations in terms of the stress:

        +\begin{eqnarray*}
   - \textrm{div}\  \sigma &=& \mathbf{f}(\mathbf{x},t)
   \qquad
   \textrm{in}\ \Omega(t),
@@ -226,30 +226,30 @@
   \mathbf{n} \ C \varepsilon(\mathbf{u}(\mathbf{x},t)) &=& \mathbf{b}(\mathbf{x},t)
   \qquad
   \textrm{on}\ \Gamma_N=\partial\Omega(t)\backslash\Gamma_D.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3077.png"/>

        -

        Note that these equations are posed on a domain $\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        -\[
+<p> Note that these equations are posed on a domain <picture><source srcset=$\Omega(t)$ that changes with time, with the boundary moving according to the displacements $\mathbf{u}(\mathbf{x},t)$ of the points on the boundary. To complete this system, we have to specify the incremental relationship between the stress and the strain, as follows:

        +\[
   \dot\sigma = C \varepsilon (\dot{\mathbf{u}}),
   \qquad
   \qquad
   \textrm{[stress-strain]}
-\] +\]" src="form_3080.png"/>

        -

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        +

        where a dot indicates a time derivative. Both the stress $\sigma$ and the strain $\varepsilon(\mathbf{u})$ are symmetric tensors of rank 2.

        Time discretization

        -

        Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step $n$:

        -\[
+<p>Numerically, this system is solved as follows: first, we discretize the time component using a backward Euler scheme. This leads to a discrete equilibrium of force at time step <picture><source srcset=$n$:

        +\[
   -\textrm{div}\  \sigma^n = f^n,
-\] +\]" src="form_3082.png"/>

        where

        -\[
+<picture><source srcset=\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n),
-\] +\]" src="form_3083.png"/>

        -

        and $\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        -\begin{align*}
+<p> and <picture><source srcset=$\Delta \mathbf{u}^n$ the incremental displacement for time step $n$. In addition, we have to specify initial data $\mathbf{u}(\cdot,0)=\mathbf{u}_0$. This way, if we want to solve for the displacement increment, we have to solve the following system:

        +\begin{align*}
   - \textrm{div}\   C \varepsilon(\Delta\mathbf{u}^n) &= \mathbf{f} + \textrm{div}\  \sigma^{n-1}
   \qquad
   &&\textrm{in}\ \Omega(t_{n-1}),
@@ -261,11 +261,11 @@
   \mathbf{n} \ C \varepsilon(\Delta \mathbf{u}^n(\mathbf{x},t)) &= \mathbf{b}(\mathbf{x},t_n)-\mathbf{b}(\mathbf{x},t_{n-1})
   \qquad
   &&\textrm{on}\ \Gamma_N=\partial\Omega(t_{n-1})\backslash\Gamma_D.
-\end{align*} +\end{align*}" src="form_3086.png"/>

        -

        The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find $\Delta \mathbf{u}^n \in
-\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        -\begin{align*}
+<p> The weak form of this set of equations, which as usual is the basis for the finite element formulation, reads as follows: find  <picture><source srcset=$\Delta \mathbf{u}^n \in
+\{v\in H^1(\Omega(t_{n-1}))^d: v|_{\Gamma_D}=\mathbf{d}(\cdot,t_n) - \mathbf{d}(\cdot,t_{n-1})\}$ such that

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -277,12 +277,12 @@
   \\
   &\qquad\qquad
   \forall \varphi \in \{\mathbf{v}\in H^1(\Omega(t_{n-1}))^d: \mathbf{v}|_{\Gamma_D}=0\}.
-\end{align*} +\end{align*}" src="form_3088.png"/>

        -

        Using that $\sigma^{n-1} \mathbf{n}
+<p> Using that   <picture><source srcset=$\sigma^{n-1} \mathbf{n}
             = [C \varepsilon(\mathbf{u}^{n-1})] \mathbf{n}
-            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        -\begin{align*}
+            = \mathbf{b}(\mathbf x, t_{n-1})$, these equations can be simplified to

        +\begin{align*}
   (C \varepsilon(\Delta\mathbf{u}^n), \varepsilon(\varphi) )_{\Omega(t_{n-1})}
   &=
   (\mathbf{f}, \varphi)_{\Omega(t_{n-1})}
@@ -294,32 +294,32 @@
   \qquad
   \qquad
   \textrm{[linear-system]}
-\end{align*} +\end{align*}" src="form_3090.png"/>

        -

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
-)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        -

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        +

        We note that, for simplicity, in the program we will always assume that there are no boundary forces, i.e. $\mathbf{b} = 0$, and that the deformation of the body is driven by body forces $\mathbf{f}$ and prescribed boundary displacements $\mathbf{d}$ alone. It is also worth noting that when integrating by parts, we would get terms of the form $(C \varepsilon(\Delta\mathbf{u}^n), \nabla \varphi
+)_{\Omega(t_{n-1})}$, but that we replace them with the term involving the symmetric gradient $\varepsilon(\varphi)$ instead of $\nabla\varphi$. Due to the symmetry of $C$, the two terms are mathematically equivalent, but the symmetric version avoids the potential for round-off errors making the resulting matrix slightly non-symmetric.

        +

        The system at time step $n$, to be solved on the old domain $\Omega(t_{n-1})$, has exactly the form of a stationary elastic problem, and is therefore similar to what we have already implemented in previous example programs. We will therefore not comment on the space discretization beyond saying that we again use lowest order continuous finite elements.

        There are differences, however:

        1. We have to move (update) the mesh after each time step, in order to be able to solve the next time step on a new domain;

        2. -We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.
        3. +We need to know $\sigma^{n-1}$ to compute the next incremental displacement, i.e. we need to compute it at the end of the time step to make sure it is available for the next time step. Essentially, the stress variable is our window to the history of deformation of the body.

        These two operations are done in the functions move_mesh and update_quadrature_point_history in the program. While moving the mesh is only a technicality, updating the stress is a little more complicated and will be discussed in the next section.

        Updating the stress variable

        -

        As indicated above, we need to have the stress variable $\sigma^n$ available when computing time step $n+1$, and we can compute it using

        -\[
+<p>As indicated above, we need to have the stress variable <picture><source srcset=$\sigma^n$ available when computing time step $n+1$, and we can compute it using

        +\[
   \sigma^n = \sigma^{n-1} + C \varepsilon (\Delta \mathbf{u}^n).
   \qquad
   \qquad
   \textrm{[stress-update]}
-\] +\]" src="form_3099.png"/>

        -

        There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store $\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        -

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        -\[
+<p> There are, despite the apparent simplicity of this equation, two questions that we need to discuss. The first concerns the way we store <picture><source srcset=$\sigma^n$: even if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are not rectangles parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program.

        +

        To decide this, we have to see where it is used. The only place where we require the stress is in the term $(\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}$. In practice, we of course replace this term by numerical quadrature:

        +\[
   (\sigma^{n-1},\varepsilon(\varphi))_{\Omega(t_{n-1})}
   =
   \sum_{K\subset {T}}
@@ -328,12 +328,12 @@
   \sum_{K\subset {T}}
   \sum_q
   w_q \ \sigma^{n-1}(\mathbf{x}_q) : \varepsilon(\varphi(\mathbf{x}_q),
-\] +\]" src="form_3103.png"/>

        -

        where $w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        -

        The second complication is not visible in our notation as chosen above. It is due to the fact that we compute $\Delta u^n$ on the domain $\Omega(t_{n-1})$, and then use this displacement increment to both update the stress as well as move the mesh nodes around to get to $\Omega(t_n)$ on which the next increment is computed. What we have to make sure, in this context, is that moving the mesh does not only involve moving around the nodes, but also making corresponding changes to the stress variable: the updated stress is a variable that is defined with respect to the coordinate system of the material in the old domain, and has to be transferred to the new domain. The reason for this can be understood as follows: locally, the incremental deformation $\Delta\mathbf{u}$ can be decomposed into three parts, a linear translation (the constant part of the displacement increment field in the neighborhood of a point), a dilational component (that part of the gradient of the displacement field that has a nonzero divergence), and a rotation. A linear translation of the material does not affect the stresses that are frozen into it – the stress values are simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It is not hard to see that the actual dependence of $R$ on $\Delta \mathbf{u}^n$ can only be through the curl of the displacement, rather than the displacement itself or its full gradient (as mentioned above, the constant components of the increment describe translations, its divergence the dilational modes, and the curl the rotational modes). Since the exact form of $R$ is cumbersome, we only state it in the program code, and note that the correct updating formula for the stress variable is then

        -\[
+<p> where <picture><source srcset=$w_q$ are the quadrature weights and $\mathbf{x}_q$ the quadrature points on cell $K$. This should make clear that what we really need is not the stress $\sigma^{n-1}$ in itself, but only the values of the stress in the quadrature points on all cells. This, however, is a simpler task: we only have to provide a data structure that is able to hold one symmetric tensor of rank 2 for each quadrature point on all cells (or, since we compute in parallel, all quadrature points of all cells that the present MPI process “owns”). At the end of each time step we then only have to evaluate $\varepsilon(\Delta \mathbf{u}^n(\mathbf{x}_q))$, multiply it by the stress-strain tensor $C$, and use the result to update the stress $\sigma^n(\mathbf{x}_q)$ at quadrature point $q$.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-11-15 06:44:28.511663377 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_19.html 2024-11-15 06:44:28.511663377 +0000 @@ -161,135 +161,135 @@

        The finite element method in general, and deal.II in particular, were invented to solve partial differential equations – in other words, to solve continuum mechanics problems. On the other hand, sometimes one wants to solve problems in which it is useful to track individual objects ("particles") and how their positions evolve. If this simply leads to a set of ordinary differential equations, for example if you want to track the positions of the planets in the solar system over time, then deal.II is clearly not the right tool. On the other hand, if this evolution is due to the interaction with the solution of partial differential equations, or if having a mesh to determine which particles interact with others (such as in the smoothed particle hydrodynamics (SPH) method), then deal.II has support for you.

        The case we will consider here is how electrically charged particles move through an electric field. As motivation, we will consider cathode rays: Electrons emitted by a heated piece of metal that is negatively charged (the "cathode"), and that are then accelerated by an electric field towards the positively charged electrode (the "anode"). The anode is typically ring-shaped so that the majority of electrons can fly through the hole in the form of an electron beam. In the olden times, they might then have illuminated the screen of a TV built from a cathode ray tube. Today, instead, electron beams are useful in X-ray machines, electron beam lithography, electron beam welding, and a number of other areas.

        The equations we will then consider are as follows: First, we need to describe the electric field. This is most easily accomplished by noting that the electric potential $V$ satisfied the equation

        -\[
+<picture><source srcset=\[
   -\epsilon_0 \Delta V = \rho
-\] +\]" src="form_3134.png"/>

        -

        where $\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        -\begin{align*}
+<p> where <picture><source srcset=$\epsilon_0$ is the dielectric constant of vacuum, and $\rho$ is the charge density. This is augmented by boundary conditions that we will choose as follows:

        +\begin{align*}
   V &= -V_0 && \text{on}\; \Gamma_\text{cathode}\subset\partial\Omega \\
   V &= +V_0 && \text{on}\; \Gamma_\text{anode}\subset\partial\Omega \\
   \epsilon\frac{\partial V}{\partial n} &= 0
    && \text{on}\; \partial\Omega\setminus\Gamma_\text{cathode}\setminus\Gamma_\text{anode}.
-\end{align*} +\end{align*}" src="form_3136.png"/>

        -

        In other words, we prescribe voltages $+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        -

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        -\[
+<p> In other words, we prescribe voltages <picture><source srcset=$+V_0$ and $-V_0$ at the two electrodes and insulating (Neumann) boundary conditions elsewhere. Since the dynamics of the particles are purely due to the electric field $\mathbf E=\nabla V$, we could as well have prescribed $2V_0$ and $0$ at the two electrodes – all that matters is the voltage difference at the two electrodes.

        +

        Given this electric potential $V$ and the electric field $\mathbf E=\nabla V$, we can describe the trajectory of the $i$th particle using the differential equation

        +\[
   m {\ddot {\mathbf x}}_i = e\mathbf E,
-\] +\]" src="form_3141.png"/>

        -

        where $m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        -\begin{align*}
+<p> where <picture><source srcset=$m,e$ are the mass and electric charge of each particle. In practice, it is convenient to write this as a system of first-order differential equations in the position $\mathbf x$ and velocity $\mathbf v$:

        +\begin{align*}
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m}, \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i.
-\end{align*} +\end{align*}" src="form_3143.png"/>

        -

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        +

        The deal.II class we will use to deal with particles, Particles::ParticleHandler, stores particles in a way so that the position $\mathbf x_i$ is part of the Particles::ParticleHandler data structures. (It stores particles sorted by cell they are in, and consequently needs to know where each particle is.) The velocity $\mathbf v_i$, on the other hand, is of no concern to Particles::ParticleHandler and consequently we will store it as a "property" of each particle that we will update in each time step. Properties can also be used to store any other quantity we might care about each particle: its charge, or if they were larger than just an electron, its color, mass, attitude in space, chemical composition, etc.

        There remain two things to discuss to complete the model: Where particles start and what the charge density $\rho$ is.

        -

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        +

        First, historically, cathode rays used very large electric fields to pull electrons out of the metal. This produces only a relatively small current. One can do better by heating the cathode: a statistical fraction of electrons in that case have enough thermal energy to leave the metal; the electric field then just has to be strong enough to pull them away from the attraction of their host body. We will model this in the following way: We will create a new particle if (i) the electric field points away from the electrode, i.e., if $\mathbf E \cdot \mathbf n < 0$ where $\mathbf n$ is the normal vector at a face pointing out of the domain (into the electrode), and (ii) the electric field exceeds a threshold value $|\mathbf E|\ge E_\text{threshold}$. This is surely not a sufficiently accurate model for what really happens, but is good enough for our current tutorial program.

        Second, in principle we would have to model the charge density via

        -\[
+<picture><source srcset=\[
   \rho(\mathbf x) = \sum_i e\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3148.png"/>

        -

        The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is $6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        -\[
+<p> The issue now is that in reality, a cathode ray tube in an old television yields a current of somewhere around a few milli-Amperes. In the much higher energy beams of particle accelerators, the current may only be a few nano-Ampere. But an Ampere is <picture><source srcset=$6\times 10^{18}$ electrons flowing per second. Now, as you will see in the results section, we really only simulate a few microseconds ( $10^{-6}$ seconds), but that still results in very very large numbers of electrons – far more than we can hope to simulate with a program as small as the current one. As a consequence, let us presume that each particle represents $N$ electrons. Then the particle mass and charge are also $Nm$ and $Ne$ and the equations we have to solve are

        +\[
   (Nm) {\ddot {\mathbf x}}_i = (Ne)\mathbf E,
-\] +\]" src="form_3152.png"/>

        -

        which is of course exactly the same as above after dividing both sides by $N$. On the other hand, the charge density for these "clumps" of electrons is given by

        -\[
+<p> which is of course exactly the same as above after dividing both sides by <picture><source srcset=$N$. On the other hand, the charge density for these "clumps" of electrons is given by

        +\[
   \rho(\mathbf x) = \sum_i (Ne)\delta(\mathbf x-\mathbf x_i).
-\] +\]" src="form_3153.png"/>

        -

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        -

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        +

        It is this form that we will implement in the program, where $N$ is chosen rather large in the program to ensure that the particles actually affect the electric field. (This may not be realistic in practice: In most cases, there are just not enough electrons to actually affect the overall electric field. But realism is not our goal here.)

        +

        As a final thought about the model, one may wonder why the equation for the electric field (or, rather, the electric potential) has no time derivative whereas the equations for the electron positions do. In essence, this is a modeling assumption: We assume that the particles move so slowly that at any given time the electric field is in equilibrium. This is saying, in other words, that the velocity of the electrons is much less than the speed of light. In yet other words, we can rephrase this in terms of the electrode voltage $V_0$: Since every volt of electric potential accelerates electrons by approximately 600 km/s (neglecting relativistic effects), requiring $|\mathbf v_i\|\ll c$ is equivalent to saying that $2V_0 \ll 500 \text{V}$. Under this assumption (and the assumption that the total number of electrons is small), one can also neglect the creation of magnetic fields by the moving charges, which would otherwise also affect the movement of the electrons.

        Time discretization

        The equations outlined above then form a set of coupled differential equations. Let us bring them all together in one place again to make that clear:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\epsilon_0 \Delta V &= \sum_i e\delta(\mathbf x-\mathbf x_i)
   \\
   {\dot {\mathbf x}}_i &= {\mathbf v}_i,
   \\
   {\dot {\mathbf v}}_i &= \frac{e\mathbf E}{m} = \frac{e\mathbf \nabla V}{m}.
-\end{align*} +\end{align*}" src="form_3157.png"/>

        Because of the awkward dependence of the electric potential on the particle locations, we don't want to solve this as a coupled system but instead use a decoupled approach where we first solve for the potential in each time step and then the particle locations. (One could also do it the other way around, of course.) This is very much in the same spirit as we do in step-21, step-31, and step-32, to name just a few, and can all be understood in the context of the operator splitting methods discussed in step-58.

        -

        So, if we denote by an upper index $n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        -\begin{align*}
+<p>So, if we denote by an upper index <picture><source srcset=$n$ the time step, and if we use a simple time discretization for the ODE, then this means that we have to solve the following set of equations in each time step:

        +\begin{align*}
   -\epsilon_0 \Delta V^{(n)} &= \sum_i e\delta(\mathbf x-\mathbf x_i^{(n-1)})
   \\
   \frac{{\mathbf v}_i^{(n)}-{\mathbf v}_i^{(n-1)}}{\Delta t} &= \frac{e\nabla V^{(n)}}{m}
   \\
   \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} &= {\mathbf v}_i^{(n)}.
-\end{align*} +\end{align*}" src="form_3158.png"/>

        -

        This scheme can be understood in the framework of operator splitting methods (specifically, the "Lie splitting" method) wherein a coupled system is solved by updating one variable at a time, using either the old values of other variables (e.g., using $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        -

        There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

        -\[
+<p> This scheme can be understood in the framework of operator splitting methods (specifically, the $\mathbf x_i^{(n-1)}$ in the first equation) or the values of variables that have already been updated in a previous sub-step (e.g., using $V^{(n)}$ in the second equation). There are of course many better ways to do a time discretization (for example the simple leapfrog scheme when updating the velocity, or more general Strang splitting methods for the coupled system) but this isn't the point of the tutorial program, and so we will be content with what we have here. (We will comment on a piece of this puzzle in the possibilities for extensions section of this program, however.)

        +

        There remains the question of how we should choose the time step size $\Delta t$. The limitation here is that the Particles::ParticleHandler class needs to keep track of which cell each particle is in. This is particularly an issue if we are running computations in parallel (say, in step-70) because in that case every process only stores those cells it owns plus one layer of "ghost cells". That's not relevant here, but in general we should make sure that over the course of each time step, a particle moves only from one cell to any of its immediate neighbors (face, edge, or vertex neighbors). If we can ensure that, then Particles::ParticleHandler is guaranteed to be able to figure out which cell a particle ends up in. To do this, a useful rule of thumb is that we should choose the time step so that for all particles the expected distance the particle moves by is less than one cell diameter:

        +\[
   \Delta t \le \frac{h_i}{\|\mathbf v_i\|} \qquad\qquad \forall i,
-\] +\]" src="form_3161.png"/>

        or equivalently

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3162.png"/>

        -

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        +

        Here, $h_i$ is the length of the shortest edge of the cell on which particle $i$ is located – in essence, a measure of the size of a cell.

        On the other hand, a particle might already be at the boundary of one cell and the neighboring cell might be once further refined. So then the time to cross that neighboring cell would actually be half the amount above, suggesting

        -\[
+<picture><source srcset=\[
   \Delta t \le \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i\|}.
-\] +\]" src="form_3163.png"/>

        But even that is not good enough: The formula above updates the particle positions in each time using the formula

        -\[
+<picture><source srcset=\[
 \frac{{\mathbf x}_i^{(n)}-{\mathbf x}_i^{(n-1)}}{\Delta t} = {\mathbf v}_i^{(n)},
-\] +\]" src="form_3164.png"/>

        -

        that is, using the current velocity ${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        -\[
+<p> that is, using the <em>current</em> velocity <picture><source srcset=${\mathbf v}_i^{n}$. But we don't have the current velocity yet at the time when we need to choose $\Delta t$ – which is after we have updated the potential $V^{(n)}$ but before we update the velocity from ${\mathbf v}_i^{(n-1)}$ to ${\mathbf v}_i^{(n)}$. All we have is ${\mathbf v}_i^{(n-1)}$. So we need an additional safety factor for our final choice:

        +\[
   \Delta t^{(n)} =
   c_\text{safety} \min_i \frac{\tfrac 12 h_i}{\|\mathbf v_i^{(n-1)}\|}.
-\] +\]" src="form_3168.png"/>

        -

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        +

        How large should $c_\text{safety}$ be? That depends on how much of underestimate $\|\mathbf v_i^{(n-1)}\|$ might be compared to $\|\mathbf v_i^{(n)}\|$, and that is actually quite easy to assess: A particle created in one time step with zero velocity will roughly pick up equal velocity increments in each successive time step if the electric field it encounters along the way were roughly constant. So the maximal difference between $\|\mathbf v_i^{(n-1)}\|$ and $\|\mathbf v_i^{(n)}\|$ would be a factor of two. As a consequence, we will choose $c_\text{safety}=0.5$.

        There is only one other case we ought to consider: What happens in the very first time step? There, any particles to be moved along have just been created, but they have a zero velocity. So we don't know what velocity we should choose for them. Of course, in all other time steps there are also particles that have just been created, but in general, the particles with the highest velocity limit the time step size and so the newly created particles with their zero velocity don't matter. But if we only have such particles?

        -

        In that case, we can use the following approximation: If a particle starts at $\mathbf v^{(0)}=0$, then the update formula tells us that

        -\[
+<p>In that case, we can use the following approximation: If a particle starts at <picture><source srcset=$\mathbf v^{(0)}=0$, then the update formula tells us that

        +\[
   {\mathbf v}_i^{(1)} = \frac{e\nabla V^{(1)}}{m} \Delta t,
-\] +\]" src="form_3174.png"/>

        and consequently

        -\[
+<picture><source srcset=\[
     \frac{{\mathbf x}_i^{(1)}-{\mathbf x}_i^{(0)}}{\Delta t} = {\mathbf v}_i^{(1)},
-\] +\]" src="form_3175.png"/>

        which we can write as

        -\[
+<picture><source srcset=\[
     {\mathbf x}_i^{(1)} - {\mathbf x}_i^{(0)} = \frac{e\nabla V^{(1)}}{m} \Delta t^2.
-\] +\]" src="form_3176.png"/>

        -

        Not wanting to move a particle by more than $\frac 12 h_i$ then implies that we should choose the time step as

        -\[
+<p> Not wanting to move a particle by more than <picture><source srcset=$\frac 12 h_i$ then implies that we should choose the time step as

        +\[
   \Delta t
   \le
   \min_i
   \sqrt{ \frac{h_i m}{e \|\nabla V^{(1)}\| }}.
-\] +\]" src="form_3178.png"/>

        Using the same argument about neighboring cells possibly being smaller by a factor of two then leads to the final formula for time step zero:

        -\[
+<picture><source srcset=\[
   \Delta t
   =
   \min_i
   \sqrt{ \frac{\frac 12 h_i m}{e \|\nabla V^{(1)}\| } }.
-\] +\]" src="form_3179.png"/>

        -

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        -

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        +

        Strictly speaking, we would have to evaluate the electric potential $V^{(1)}$ at the location of each particle, but a good enough approximation is to use the maximum of the values at the vertices of the respective cell. (Why the vertices and not the midpoint? Because the gradient of the solution of the Laplace equation, i.e., the electric field, is largest in corner singularities which are located at the vertices of cells.) This has the advantage that we can make good use of the FEValues functionality which can recycle pre-computed material as long as the quadrature points are the same from one cell to the next.

        +

        We could always run this kind of scheme to estimate the difference between $\mathbf v_i^{(n-1)}$ and $\mathbf v_i^{(n)}$, but it relies on evaluating the electric field $\mathbf E$ on each cell, and that is expensive. As a consequence, we will limit this approach to the very first time step.

        Spatial discretization

        -

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        +

        Having discussed the time discretization, the discussion of the spatial discretization is going to be short: We use quadratic finite elements, i.e., the space $Q_2$, to approximate the electric potential $V$. The mesh is adapted a couple of times during the initial time step. All of this is entirely standard if you have read step-6, and the implementation does not provide for any kind of surprise.

        Dealing with particles programmatically

        Adding and moving particles is, in practice, not very difficult in deal.II. To add one, the create_particles() function of this program simply uses a code snippet of the following form:

        new_particle.set_location(location);
        @@ -302,7 +302,7 @@
        void set_reference_location(const Point< dim > &new_reference_location)
        Definition particle.h:572
        void set_id(const types::particle_index &new_id)
        Definition particle.h:599
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-11-15 06:44:28.543663662 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_2.html 2024-11-15 06:44:28.543663662 +0000 @@ -132,14 +132,14 @@

      Introduction

      Note
      The material presented here is also discussed in video lecture 9. (All video lectures are also available here.)
      -

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      -

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      -

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      -

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
-V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      +

      The finite element method is based on approximating the solution $u$ of a differential equation such as $-\Delta u=f$ by a function $u_h$ that is "piecewise" polynomial; that is, we subdivide the domain $\Omega$ on which the equation is posed into small cells that in the documentation we will generally denote by the symbol $K$. On each cell $K$, the approximating function $u_h$ we seek is then a polynomial. (Or, strictly speaking, a function that is the image of a polynomial from a "reference cell", but let us not make things more complicated than necessary for now.)

      +

      In the previous tutorial program (in step-1), we showed how we should think of the subdivision of the domain into cells as a "mesh" represented by the Triangulation class, and how this looks like in code. In the current tutorial program, we now show how one represents piecewise polynomial functions through the concept of degrees of freedom defined on this mesh. For this example, we will use the lowest order ( $Q_1$) finite elements, that is the approximating function $u_h$ we are looking for will be "bi-linear" on each quadrilateral cell $K$ of the mesh. (They would be linear if we would work on triangles.)

      +

      In practice, we represent the function as a linear combination of shape functions $\varphi_j(\mathbf x)$ with multipliers $U_j$ that we call the "degrees of freedom". For the bi-linear functions we consider here, each of these shape functions and degrees of freedom is associated with a vertex of the mesh. Later examples will demonstrate higher order elements where degrees of freedom are not necessarily associated with vertices any more, but can be associated with edges, faces, or cells.

      +

      The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape functions, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term can be explained as follows: A mathematical description of finite element problems is often to say that we are looking for a finite dimensional function $u_h \in V_h$ that satisfies some set of equations (e.g. $a(u_h,\varphi_h)=(f,\varphi_h)$ for all test functions $\varphi_h\in
+V_h$). In other words, all we say here is that the solution needs to lie in some space $V_h$. However, to actually solve this problem on a computer we need to choose a basis of this space; this is the set of shape functions $\varphi_j(\mathbf x)$ we have used above in the expansion of $u_h(\mathbf x)$ with coefficients $U_j$. There are of course many bases of the space $V_h$, but we will specifically choose the one that is described by the finite element functions that are traditionally defined locally on the cells of the mesh.

      Enumerating degrees of freedom

      -

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      +

      Describing "degrees of freedom" in this context requires us to simply enumerate the basis functions of the space $V_h$. For $Q_1$ elements this means simply enumerating the vertices of the mesh in some way, but for higher order elements, one also has to enumerate the shape functions that are associated with edges, faces, or cell interiors of the mesh. In other words, the enumeration of degrees of freedom is an entirely separate thing from the indices we use for vertices. The class that provides this enumeration of the basis functions of $V_h$ is called DoFHandler.

      Defining degrees of freedom ("DoF"s in short) on a mesh is, in practice, a rather simple task, since the library does all the work for you. Essentially, all you have to do is create a finite element object (from one of the many finite element classes deal.II already has, see for example the Finite element space descriptions documentation) and give it to a DoFHandler object through the DoFHandler::distribute_dofs() function ("distributing DoFs" is the term we use to describe the process of enumerating the basis functions as discussed above). The DoFHandler is a class that knows which degrees of freedom live where, i.e., it can answer questions like "how many degrees of freedom are there globally" and "on this cell, give me the global indices of the shape functions that live here". This is the sort of information you need when determining how big your system matrix should be, and when copying the contributions of a single cell into the global matrix.

      The first task of the current program is therefore to take a mesh and a finite element, and enumerate the degrees of freedom. In the current context, this means simply giving each vertex of the mesh a DoF index. Once that has happened, we will output in a picture which vertex ended up with which DoF index. You can find the corresponding pictures in the results section of this tutorial.

      @@ -148,11 +148,11 @@

      The next step would then be to compute a matrix and right hand side corresponding to a particular differential equation using this finite element and mesh. We will keep this step for the step-3 program and rather talk about one practical aspect of a finite element program, namely that finite element matrices are always very sparse: almost all entries in these matrices are zero.

      To be more precise, we say that a matrix is sparse if the number of nonzero entries per row in the matrix is bounded by a number that is independent of the overall number of degrees of freedom. For example, the simple 5-point stencil of a finite difference approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix. For more complicated problems – say, the Stokes problem of step-22 – and in particular in 3d, the number of entries per row may be several hundred. But the important point is that this number is independent of the overall size of the problem: If you refine the mesh, the maximal number of unknowns per row remains the same.

      Sparsity is one of the distinguishing features of the finite element method compared to, say, approximating the solution of a partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis.

      -

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      -

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      +

      In practical terms, it is the sparsity of matrices that enables us to solve problems with millions or billions of unknowns. To understand this, note that a matrix with $N$ rows, each with a fixed upper bound for the number of nonzero entries, requires ${\cal O}(N)$ memory locations for storage, and a matrix-vector multiplication also requires only ${\cal O}(N)$ operations. Consequently, if we had a linear solver that requires only a fixed number of matrix-vector multiplications to come up with the solution of a linear system with this matrix, then we would have a solver that can find the values of all $N$ unknowns with optimal complexity, i.e., with a total of ${\cal O}(N)$ operations. It is clear that this wouldn't be possible if the matrix were not sparse (because then the number of entries in the matrix would have to be ${\cal O}(N^s)$ with some $s>1$, and doing a fixed number of matrix-vector products would take ${\cal O}(N^s)$ operations), but it also requires very specialized solvers such as multigrid methods to satisfy the requirement that the solution requires only a fixed number of matrix-vector multiplications. We will frequently look at the question of what solver to use in the remaining programs of this tutorial.

      +

      The sparsity is generated by the fact that finite element shape functions are defined locally on individual cells, rather than globally, and that the local differential operators in the bilinear form only couple shape functions whose support overlaps. (The "support" of a function is the area where it is nonzero. For the finite element method, the support of a shape function is generally the cells adjacent to the vertex, edge, or face it is defined on.) In other words, degrees of freedom $i$ and $j$ that are not defined on the same cell do not overlap, and consequently the matrix entry $A_{ij}$ will be zero. (In some cases such as the Discontinuous Galerkin method, shape functions may also connect to neighboring cells through face integrals. But finite element methods do not generally couple shape functions beyond the immediate neighbors of a cell on which the function is defined.)

      How degrees of freedom are enumerated

      By default, the DoFHandler class enumerates degrees of freedom on a mesh using an algorithm that is difficult to describe and leads to results that do look right if you know what it is doing but otherwise appears rather random; consequently, the sparsity pattern is also not optimized for any particular purpose. To show this, the code below will demonstrate a simple way to output the "sparsity pattern" that corresponds to a DoFHandler, i.e., an object that represents all of the potentially nonzero elements of a matrix one may build when discretizing a partial differential equation on a mesh and its DoFHandler. This lack of structure in the sparsity pattern will be apparent from the pictures we show below.

      -

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      +

      For most applications and algorithms, the exact way in which degrees of freedom are numbered does not matter. For example, the Conjugate Gradient method we use to solve linear systems does not care. On the other hand, some algorithms do care: in particular, some preconditioners such as SSOR will work better if they can walk through degrees of freedom in a particular order, and it would be nice if we could just sort them in such a way that SSOR can iterate through them from zero to $N$ in this order. Other examples include computing incomplete LU or Cholesky factorizations, or if we care about the block structure of matrices (see step-20 for an example). deal.II therefore has algorithms that can re-enumerate degrees of freedom in particular ways in namespace DoFRenumbering. Renumbering can be thought of as choosing a different, permuted basis of the finite element space. The sparsity pattern and matrices that result from this renumbering are therefore also simply a permutation of rows and columns compared to the ones we would get without explicit renumbering.

      In the program below, we will use the algorithm of Cuthill and McKee to do so. We will show the sparsity pattern for both the original enumeration of degrees of freedom and of the renumbered version below, in the results section.

      The commented program

      The first few includes are just like in the previous program, so do not require additional comments:

      @@ -291,7 +291,7 @@
       

      Renumbering of DoFs

      In the sparsity pattern produced above, the nonzero entries extended quite far off from the diagonal. For some algorithms, for example for incomplete LU decompositions or Gauss-Seidel preconditioners, this is unfavorable, and we will show a simple way how to improve this situation.

      -

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      +

      Remember that for an entry $(i,j)$ in the matrix to be nonzero, the supports of the shape functions i and j needed to intersect (otherwise in the integral, the integrand would be zero everywhere since either the one or the other shape function is zero at some point). However, the supports of shape functions intersected only if they were adjacent to each other, so in order to have the nonzero entries clustered around the diagonal (where $i$ equals $j$), we would like to have adjacent shape functions to be numbered with indices (DoF numbers) that differ not too much.

      This can be accomplished by a simple front marching algorithm, where one starts at a given vertex and gives it the index zero. Then, its neighbors are numbered successively, making their indices close to the original one. Then, their neighbors, if not yet numbered, are numbered, and so on.

      One algorithm that adds a little bit of sophistication along these lines is the one by Cuthill and McKee. We will use it in the following function to renumber the degrees of freedom such that the resulting sparsity pattern is more localized around the diagonal. The only interesting part of the function is the first call to DoFRenumbering::Cuthill_McKee, the rest is essentially as before:

        void renumber_dofs(DoFHandler<2> &dof_handler)
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-11-15 06:44:28.607664234 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_20.html 2024-11-15 06:44:28.607664234 +0000 @@ -167,13 +167,13 @@ p &=& g \qquad {\textrm{on}\ }\partial\Omega. \end{eqnarray*}" src="form_3206.png"/>

      -

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      +

      $K({\mathbf x})$ is assumed to be uniformly positive definite, i.e., there is $\alpha>0$ such that the eigenvalues $\lambda_i({\mathbf x})$ of $K(x)$ satisfy $\lambda_i({\mathbf x})\ge \alpha$. The use of the symbol $p$ instead of the usual $u$ for the solution variable will become clear in the next section.

      After discussing the equation and the formulation we are going to use to solve it, this introduction will cover the use of block matrices and vectors, the definition of solvers and preconditioners, and finally the actual test case we are going to solve.

      We are going to extend this tutorial program in step-21 to solve not only the mixed Laplace equation, but add another equation that describes the transport of a mixture of two fluids.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

      The equations

      In the form above, the Poisson equation (i.e., the Laplace equation with a nonzero right hand side) is generally considered a good model equation for fluid flow in porous media. Of course, one typically models fluid flow through the Navier-Stokes equations or, if fluid velocities are slow or the viscosity is large, the Stokes equations (which we cover in step-22). In the first of these two models, the forces that act are inertia and viscous friction, whereas in the second it is only viscous friction – i.e., forces that one fluid particle exerts on a nearby one. This is appropriate if you have free flow in a large domain, say a pipe, a river, or in the air. On the other hand, if the fluid is confined in pores, then friction forces exerted by the pore walls on the fluid become more and more important and internal viscous friction becomes less and less important. Modeling this then first leads to the Brinkman model if both effects are important, and in the limit of very small pores to the Darcy equations. The latter is just a different name for the Poisson or Laplace equation, connotating it with the area to which one wants to apply it: slow flow in a porous medium. In essence it says that the velocity is proportional to the negative pressure gradient that drives the fluid through the porous medium.

      -

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      +

      The Darcy equation models this pressure that drives the flow. (Because the solution variable is a pressure, we here use the name $p$ instead of the name $u$ more commonly used for the solution of partial differential equations.) Typical applications of this view of the Laplace equation are then modeling groundwater flow, or the flow of hydrocarbons in oil reservoirs. In these applications, $K$ is the permeability tensor, i.e., a measure for how much resistance the soil or rock matrix asserts on the fluid flow.

      In the applications named above, a desirable feature for a numerical scheme is that it should be locally conservative, i.e., that whatever flows into a cell also flows out of it (or the difference is equal to the integral over the source terms over each cell, if the sources are nonzero). However, as it turns out, the usual discretizations of the Laplace equation (such as those used in step-3, step-4, or step-6) do not satisfy this property. But, one can achieve this by choosing a different formulation of the problem and a particular combination of finite element spaces.

      Formulation, weak form, and discrete problem

      To this end, one first introduces a second variable, called the velocity, ${\mathbf u}=-K\nabla p$. By its definition, the velocity is a vector in the negative direction of the pressure gradient, multiplied by the permeability tensor. If the permeability tensor is proportional to the unit matrix, this equation is easy to understand and intuitive: the higher the permeability, the higher the velocity; and the velocity is proportional to the gradient of the pressure, going from areas of high pressure to areas of low pressure (thus the negative sign).

      @@ -202,15 +202,15 @@ \end{eqnarray*}" src="form_3217.png"/>

      Here, ${\mathbf n}$ is the outward normal vector at the boundary. Note how in this formulation, Dirichlet boundary values of the original problem are incorporated in the weak form.

      -

      To be well-posed, we have to look for solutions and test functions in the space $H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      -

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
+<p>To be well-posed, we have to look for solutions and test functions in the space <picture><source srcset=$H({\textrm{div}})=\{{\mathbf w}\in L^2(\Omega)^d:\ {\textrm{div}}\ {\mathbf w}\in L^2\}$ for $\mathbf u$, $\mathbf v$, and $L^2$ for $p,q$. It is a well-known fact stated in almost every book on finite element theory that if one chooses discrete finite element spaces for the approximation of ${\mathbf u},p$ inappropriately, then the resulting discrete problem is instable and the discrete solution will not converge to the exact solution. (Some details on the problem considered here – which falls in the class of "saddle-point problems" – can be found on the Wikipedia page on the Ladyzhenskaya-Babuska-Brezzi (LBB) condition.)

      +

      To overcome this, a number of different finite element pairs for ${\mathbf u},p$ have been developed that lead to a stable discrete problem. One such pair is to use the Raviart-Thomas spaces $RT(k)$ for the velocity ${\mathbf u}$ and discontinuous elements of class $DQ(k)$ for the pressure $p$. For details about these spaces, we refer in particular to the book on mixed finite element methods by Brezzi and Fortin, but many other books on the theory of finite elements, for example the classic book by Brenner and Scott, also state the relevant results. In any case, with appropriate choices of function spaces, the discrete formulation reads as follows: Find ${\mathbf
 u}_h,p_h$ so that

      \begin{eqnarray*}
   A(\{{\mathbf u}_h,p_h\},\{{\mathbf v}_h,q_h\}) = F(\{{\mathbf v}_h,q_h\})
   \qquad\qquad \forall {\mathbf v}_h,q_h.
 \end{eqnarray*}

      -

      Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      +

      Before continuing, let us briefly pause and show that the choice of function spaces above provides us with the desired local conservation property. In particular, because the pressure space consists of discontinuous piecewise polynomials, we can choose the test function $q$ as the function that is equal to one on any given cell $K$ and zero everywhere else. If we also choose ${\mathbf v}=0$ everywhere (remember that the weak form above has to hold for all discrete test functions $q,v$), then putting these choices of test functions into the weak formulation above implies in particular that

      \begin{eqnarray*}
   - (1,{\textrm{div}}\ {\mathbf u}_h)_K
   =
@@ -233,7 +233,7 @@
 \end{eqnarray*}

      If you now recall that ${\mathbf u}$ was the velocity, then the integral on the left is exactly the (discrete) flux across the boundary of the cell $K$. The statement is then that the flux must be equal to the integral over the sources within $K$. In particular, if there are no sources (i.e., $f=0$ in $K$), then the statement is that total flux is zero, i.e., whatever flows into a cell must flow out of it through some other part of the cell boundary. This is what we call local conservation because it holds for every cell.

      -

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      +

      On the other hand, the usual continuous $Q_k$ elements would not result in this kind of property when used for the pressure (as, for example, we do in step-43) because one can not choose a discrete test function $q_h$ that is one on a cell $K$ and zero everywhere else: It would be discontinuous and consequently not in the finite element space. (Strictly speaking, all we can say is that the proof above would not work for continuous elements. Whether these elements might still result in local conservation is a different question as one could think that a different kind of proof might still work; in reality, however, the property really does not hold.)

      Assembling the linear system

      The deal.II library (of course) implements Raviart-Thomas elements $RT(k)$ of arbitrary order $k$, as well as discontinuous elements $DG(k)$. If we forget about their particular properties for a second, we then have to solve a discrete problem

      \begin{eqnarray*}
@@ -241,8 +241,8 @@
 \end{eqnarray*}

      with the bilinear form and right hand side as stated above, and $x_h=\{{\mathbf u}_h,p_h\}$, $w_h=\{{\mathbf v}_h,q_h\}$. Both $x_h$ and $w_h$ are from the space $X_h=RT(k)\times DQ(k)$, where $RT(k)$ is itself a space of $dim$-dimensional functions to accommodate for the fact that the flow velocity is vector-valued. The necessary question then is: how do we do this in a program?

      -

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      -

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      +

      Vector-valued elements have already been discussed in previous tutorial programs, the first time and in detail in step-8. The main difference there was that the vector-valued space $V_h$ is uniform in all its components: the $dim$ components of the displacement vector are all equal and from the same function space. What we could therefore do was to build $V_h$ as the outer product of the $dim$ times the usual $Q(1)$ finite element space, and by this make sure that all our shape functions have only a single non-zero vector component. Instead of dealing with vector-valued shape functions, all we did in step-8 was therefore to look at the (scalar) only non-zero component and use the fe.system_to_component_index(i).first call to figure out which component this actually is.

      +

      This doesn't work with Raviart-Thomas elements: following from their construction to satisfy certain regularity properties of the space $H({\textrm{div}})$, the shape functions of $RT(k)$ are usually nonzero in all their vector components at once. For this reason, were fe.system_to_component_index(i).first applied to determine the only nonzero component of shape function $i$, an exception would be generated. What we really need to do is to get at all vector components of a shape function. In deal.II diction, we call such finite elements non-primitive, whereas finite elements that are either scalar or for which every vector-valued shape function is nonzero only in a single vector component are called primitive.

      So what do we have to do for non-primitive elements? To figure this out, let us go back in the tutorial programs, almost to the very beginnings. There, we learned that we use the FEValues class to determine the values and gradients of shape functions at quadrature points. For example, we would call fe_values.shape_value(i,q_point) to obtain the value of the ith shape function on the quadrature point with number q_point. Later, in step-8 and other tutorial programs, we learned that this function call also works for vector-valued shape functions (of primitive finite elements), and that it returned the value of the only non-zero component of shape function i at quadrature point q_point.

      For non-primitive shape functions, this is clearly not going to work: there is no single non-zero vector component of shape function i, and the call to fe_values.shape_value(i,q_point) would consequently not make much sense. However, deal.II offers a second function call, fe_values.shape_value_component(i,q_point,comp) that returns the value of the compth vector component of shape function i at quadrature point q_point, where comp is an index between zero and the number of vector components of the present finite element; for example, the element we will use to describe velocities and pressures is going to have $dim+1$ components. It is worth noting that this function call can also be used for primitive shape functions: it will simply return zero for all components except one; for non-primitive shape functions, it will in general return a non-zero value for more than just one component.

      We could now attempt to rewrite the bilinear form above in terms of vector components. For example, in 2d, the first term could be rewritten like this (note that $u_0=x_0, u_1=x_1, p=x_2$):

      @@ -276,7 +276,7 @@

      fe_values.shape_value_component(j,q,1)
      ) *
      fe_values.JxW(q);
      -

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      +

      This is, at best, tedious, error prone, and not dimension independent. There are obvious ways to make things dimension independent, but in the end, the code is simply not pretty. What would be much nicer is if we could simply extract the ${\mathbf u}$ and $p$ components of a shape function $x_h^i$. In the program we do that in the following way:

      const FEValuesExtractors::Vector velocities (0);
      const FEValuesExtractors::Scalar pressure (dim);
      @@ -354,8 +354,8 @@

      You will find the exact same code as above in the sources for the present program. We will therefore not comment much on it below.

      Linear solvers and preconditioners

      After assembling the linear system we are faced with the task of solving it. The problem here is that the matrix possesses two undesirable properties:

        -
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • -
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).
      • +
      • It is indefinite, i.e., it has both positive and negative eigenvalues. We don't want to prove this property here, but note that this is true for all matrices of the form $\left(\begin{array}{cc} M & B \\ B^T & 0 \end{array}\right)$ such as the one here where $M$ is positive definite.
      • +
      • The matrix has a zero block at the bottom right (there is no term in the bilinear form that couples the pressure $p$ with the pressure test function $q$).

      At least it is symmetric, but the first issue above still means that the Conjugate Gradient method is not going to work since it is only applicable to problems in which the matrix is symmetric and positive definite. We would have to resort to other iterative solvers instead, such as MinRes, SymmLQ, or GMRES, that can deal with indefinite systems. However, then the next problem immediately surfaces: Due to the zero block, there are zeros on the diagonal and none of the usual, "simple" preconditioners (Jacobi, SSOR) will work as they require division by diagonal elements.

      For the matrix sizes we expect to run with this program, the by far simplest approach would be to just use a direct solver (in particular, the SparseDirectUMFPACK class that is bundled with deal.II). step-29 goes this route and shows that solving any linear system can be done in just 3 or 4 lines of code.

      @@ -375,24 +375,24 @@ \end{array}\right), \end{eqnarray*}" src="form_3250.png"/>

      -

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      +

      where $U,P$ are the values of velocity and pressure degrees of freedom, respectively, $M$ is the mass matrix on the velocity space, $B^T$ corresponds to the negative divergence operator, and $B$ is its transpose and corresponds to the gradient.

      By block elimination, we can then re-order this system in the following way (multiply the first row of the system by $B^TM^{-1}$ and then subtract the second row from it):

      \begin{eqnarray*}
   B^TM^{-1}B P &=& B^TM^{-1} F - G, \\
   MU &=& F - BP.
 \end{eqnarray*}

      -

      Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

      -

      Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

        +

        Here, the matrix $S=B^TM^{-1}B$ (called the Schur complement of $A$) is obviously symmetric and, owing to the positive definiteness of $M$ and the fact that $B$ has full column rank, $S$ is also positive definite.

        +

        Consequently, if we could compute $S$, we could apply the Conjugate Gradient method to it. However, computing $S$ is expensive because it requires us to compute the inverse of the (possibly large) matrix $M$; and $S$ is in fact also a full matrix because even though $M$ is sparse, its inverse $M^{-1}$ will generally be a dense matrix. On the other hand, the CG algorithm doesn't require us to actually have a representation of $S$: It is sufficient to form matrix-vector products with it. We can do so in steps, using the fact that matrix products are associative (i.e., we can set parentheses in such a way that the product is more convenient to compute): To compute $Sv=(B^TM^{-1}B)v=B^T(M^{-1}(Bv))$, we

        1. compute $w = B v$;
        2. -solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
        3. +solve $My = w$ for $y=M^{-1}w$, using the CG method applied to the positive definite and symmetric mass matrix $M$;
        4. compute $z=B^Ty$ to obtain $z=Sv$.

        Note how we evaluate the expression $B^TM^{-1}Bv$ right to left to avoid matrix-matrix products; this way, all we have to do is evaluate matrix-vector products.

        -

        In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

        +

        In the following, we will then have to come up with ways to represent the matrix $S$ so that it can be used in a Conjugate Gradient solver, as well as to define ways in which we can precondition the solution of the linear system involving $S$, and deal with solving linear systems with the matrix $M$ (the second step above).

        Note
        The key point in this consideration is to recognize that to implement an iterative solver such as CG or GMRES, we never actually need the actual elements of a matrix! All that is required is that we can form matrix-vector products. The same is true for preconditioners. In deal.II we encode this requirement by only requiring that matrices and preconditioners given to solver classes have a vmult() member function that does the matrix-vector product. How a class chooses to implement this function is not important to the solver. Consequently, classes can implement it by, for example, doing a sequence of products and linear solves as discussed above.

        The LinearOperator framework in deal.II

        deal.II includes support for describing such linear operations in a very general way. This is done with the LinearOperator class that, like the MatrixType concept, defines a minimal interface for applying a linear operation to a vector:

        std::function<void(Range &, const Domain &)> vmult;
        @@ -419,7 +419,7 @@

        We now have a LinearOperator op_M_inv that we can use to construct more complicated operators such as the Schur complement $S$. Assuming that B is a reference to the upper right block constructing a LinearOperator op_S is a matter of two lines:

        const auto op_B = linear_operator(B);
        const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
        LinearOperator< Domain, Range, Payload > transpose_operator(const LinearOperator< Range, Domain, Payload > &op)
        -

        Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

        B.vmult (tmp1, src); // multiply with the top right block: B
        +

        Here, the multiplication of three LinearOperator objects yields a composite object op_S whose vmult() function first applies $B$, then $M^{-1}$ (i.e. solving an equation with $M$), and finally $B^T$ to any given input vector. In that sense op_S.vmult() is similar to the following code:

        B.vmult (tmp1, src); // multiply with the top right block: B
        solver_M(M, tmp2, tmp1, preconditioner_M); // multiply with M^-1
        B.Tvmult (dst, tmp2); // multiply with the bottom left block: B^T

        (tmp1 and tmp2 are two temporary vectors). The key point behind this approach is the fact that we never actually create an inner product of matrices. Instead, whenever we have to perform a matrix vector multiplication with op_S we simply run all individual vmult operations in above sequence.

        @@ -441,7 +441,7 @@
        Even though both approaches are exactly equivalent, the LinearOperator class has a big advantage over this manual approach. It provides so-called syntactic sugar: Mathematically, we think about $S$ as being the composite matrix $S=B^TM^{-1}B$ and the LinearOperator class allows you to write this out more or less verbatim,
        const auto op_M_inv = inverse_operator(op_M, solver_M, preconditioner_M);
        const auto op_S = transpose_operator(op_B) * op_M_inv * op_B;
        The manual approach on the other hand obscures this fact.
    -

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    +

    All that is left for us to do now is to form the right hand sides of the two equations defining $P$ and $U$, and then solve them with the Schur complement matrix and the mass matrix, respectively. For example the right hand side of the first equation reads $B^TM^{-1}F-G$. This could be implemented as follows:

    Vector<double> schur_rhs (P.size());
    Vector<double> tmp (U.size());
    op_M_inv.vmult (tmp, F);
    transpose_operator(op_B).vmult (schur_rhs, tmp);
    @@ -450,7 +450,7 @@
    std::function<void(Range &)> apply_add;

    The class allows lazy evaluation of expressions involving vectors and linear operators. This is done by storing the computational expression and only performing the computation when either the object is converted to a vector object, or PackagedOperation::apply() (or PackagedOperation::apply_add()) is invoked by hand. Assuming that F and G are the two vectors of the right hand side we can simply write:

    const auto schur_rhs = transpose_operator(op_B) * op_M_inv * F - G;

    Here, schur_rhs is a PackagedOperation that records the computation we specified. It does not create a vector with the actual result immediately.

    -

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    +

    With these prerequisites at hand, solving for $P$ and $U$ is a matter of creating another solver and inverse:

    SolverControl solver_control_S(2000, 1.e-12);
    SolverCG<Vector<double>> solver_S(solver_control_S);
    PreconditionIdentity preconditioner_S;
    @@ -470,9 +470,9 @@ \end{eqnarray*}" src="form_3265.png"/>

    as a preconditioner? That would mean that every time we have to do one preconditioning step, we actually have to solve with $\tilde S$. At first, this looks almost as expensive as solving with $S$ right away. However, note that in the inner iteration, we do not have to calculate $M^{-1}$, but only the inverse of its diagonal, which is cheap.

    -

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    +

    Thankfully, the LinearOperator framework makes this very easy to write out. We already used a Jacobi preconditioner (preconditioner_M) for the $M$ matrix earlier. So all that is left to do is to write out how the approximate Schur complement should look like:

    const auto op_aS =
    transpose_operator(op_B) * linear_operator(preconditioner_M) * op_B;
    -

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    +

    Note how this operator differs in simply doing one Jacobi sweep (i.e. multiplying with the inverses of the diagonal) instead of multiplying with the full $M^{-1}$. (This is how a single Jacobi preconditioner step with $M$ is defined: it is the multiplication with the inverse of the diagonal of $M$; in other words, the operation $({\textrm{diag}\ }M)^{-1}x$ on a vector $x$ is exactly what PreconditionJacobi does.)

    With all this we almost have the preconditioner completed: it should be the inverse of the approximate Schur complement. We implement this again by creating a linear operator with inverse_operator() function. This time however we would like to choose a relatively modest tolerance for the CG solver (that inverts op_aS). The reasoning is that op_aS is only coarse approximation to op_S, so we actually do not need to invert it exactly. This, however creates a subtle problem: preconditioner_S will be used in the final outer CG iteration to create an orthogonal basis. But for this to work, it must be precisely the same linear operation for every invocation. We ensure this by using an IterationNumberControl that allows us to fix the number of CG iterations that are performed to a fixed small number (in our case 30):

    IterationNumberControl iteration_number_control_aS(30, 1.e-18);
    SolverCG<Vector<double>> solver_aS(iteration_number_control_aS);
    PreconditionIdentity preconditioner_aS;
    @@ -734,7 +734,7 @@
     
    void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())

    The next thing is that we want to figure out the sizes of these blocks so that we can allocate an appropriate amount of space. To this end, we call the DoFTools::count_dofs_per_fe_component() function that counts how many shape functions are non-zero for a particular vector component. We have dim+1 vector components, and DoFTools::count_dofs_per_fe_component() will count how many shape functions belong to each of these components.

    -

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    +

    There is one problem here. As described in the documentation of that function, it wants to put the number of $x$-velocity shape functions into dofs_per_component[0], the number of $y$-velocity shape functions into dofs_per_component[1] (and similar in 3d), and the number of pressure shape functions into dofs_per_component[dim]. But, the Raviart-Thomas element is special in that it is non-primitive, i.e., for Raviart-Thomas elements all velocity shape functions are nonzero in all components. In other words, the function cannot distinguish between $x$ and $y$ velocity functions because there is no such distinction. It therefore puts the overall number of velocity into each of dofs_per_component[c], $0\le c\le \text{dim}$. On the other hand, the number of pressure variables equals the number of shape functions that are nonzero in the dim-th component.

    Using this knowledge, we can get the number of velocity shape functions from any of the first dim elements of dofs_per_component, and then use this below to initialize the vector and matrix block sizes, as well as create output.

    Note
    If you find this concept difficult to understand, you may want to consider using the function DoFTools::count_dofs_per_fe_block() instead, as we do in the corresponding piece of code in step-22. You might also want to read up on the difference between blocks and components in the glossary.
      const std::vector<types::global_dof_index> dofs_per_component =
    @@ -1088,7 +1088,7 @@
      }

    Results

    Output of the program and graphical visualization

    -

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
    +

    If we run the program as is, we get this output for the $32\times 32$ mesh we use (for a total of 1024 cells with 1024 pressure degrees of freedom since we use piecewise constants, and 2112 velocities because the Raviart-Thomas element defines one degree per freedom per face and there are $1024 + 32 = 1056$ faces parallel to the $x$-axis and the same number parallel to the $y$-axis):

    \$ make run
     [ 66%] Built target step-20
     Scanning dependencies of target run
     [100%] Run step-20 with Release configuration
    @@ -1107,7 +1107,7 @@
     

    As an additional remark, note how the x-velocity in the left image is only continuous in x-direction, whereas the y-velocity is continuous in y-direction. The flow fields are discontinuous in the other directions. This very obviously reflects the continuity properties of the Raviart-Thomas elements, which are, in fact, only in the space H(div) and not in the space $H^1$. Finally, the pressure field is completely discontinuous, but that should not surprise given that we have chosen FE_DGQ(0) as the finite element for that solution component.

    Convergence

    The program offers two obvious places where playing and observing convergence is in order: the degree of the finite elements used (passed to the constructor of the MixedLaplaceProblem class from main()), and the refinement level (determined in MixedLaplaceProblem::make_grid_and_dofs). What one can do is to change these values and observe the errors computed later on in the course of the program run.

    -

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    +

    If one does this, one finds the following pattern for the $L_2$ error in the pressure variable:

    @@ -1130,7 +1130,7 @@
    Finite element order
    $O(h)$ $O(h^2)$ $O(h^3)$

    The theoretically expected convergence orders are very nicely reflected by the experimentally observed ones indicated in the last row of the table.

    -

    One can make the same experiment with the $L_2$ error in the velocity variables:

    +

    One can make the same experiment with the $L_2$ error in the velocity variables:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-11-15 06:44:28.687664949 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_21.html 2024-11-15 06:44:28.687664949 +0000 @@ -168,7 +168,7 @@

    The equations covered here are an extension of the material already covered in step-20. In particular, they fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

    The two phase flow problem

    Modeling of two phase flow in porous media is important for both environmental remediation and the management of petroleum and groundwater reservoirs. Practical situations involving two phase flow include the dispersal of a nonaqueous phase liquid in an aquifer, or the joint movement of a mixture of fluids such as oil and water in a reservoir. Simulation models, if they are to provide realistic predictions, must accurately account for these effects.

    -

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    +

    To derive the governing equations, consider two phase flow in a reservoir $\Omega$ under the assumption that the movement of fluids is dominated by viscous effects; i.e. we neglect the effects of gravity, compressibility, and capillary pressure. Porosity will be considered to be constant. We will denote variables referring to either of the two phases using subscripts $w$ and $o$, short for water and oil. The derivation of the equations holds for other pairs of fluids as well, however.

    The velocity with which molecules of each of the two phases move is determined by Darcy's law that states that the velocity is proportional to the pressure gradient:

    \begin{eqnarray*}
   \mathbf{u}_{j}
@@ -176,7 +176,7 @@
   -\frac{k_{rj}(S)}{\mu_{j}} \mathbf{K} \cdot \nabla p
 \end{eqnarray*}

    -

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    +

    where $\mathbf{u}_{j}$ is the velocity of phase $j=o,w$, $K$ is the permeability tensor, $k_{rj}$ is the relative permeability of phase $j$, $p$ is the pressure and $\mu_{j}$ is the viscosity of phase $j$. Finally, $S$ is the saturation (volume fraction), i.e. a function with values between 0 and 1 indicating the composition of the mixture of fluids. In general, the coefficients $K, k_{rj}, \mu$ may be spatially dependent variables, and we will always treat them as non-constant functions in the following.

    We combine Darcy's law with the statement of conservation of mass for each phase,

    \[
   \textrm{div}\ \mathbf{u}_{j} = q_j,
@@ -187,7 +187,7 @@
 - \nabla \cdot (\mathbf{K}\lambda(S) \nabla p)= q.
 \end{eqnarray*}

    -

    Here, $q$ is the sum source term, and

    +

    Here, $q$ is the sum source term, and

    \[
   \lambda(S) = \frac{k_{rw}(S)}{\mu_{w}}+\frac{k_{ro}(S)}{\mu_{o}}
 \] @@ -263,7 +263,7 @@

    where $\triangle t$ is the length of a time step. Note how we solve the implicit pressure-velocity system that only depends on the previously computed saturation $S^n$, and then do an explicit time step for $S^{n+1}$ that only depends on the previously known $S^n$ and the just computed $\mathbf{u}^{n+1}$. This way, we never have to iterate for the nonlinearities of the system as we would have if we used a fully implicit method. (In a more modern perspective, this should be seen as an "operator splitting" method. step-58 has a long description of the idea behind this.)

    -

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    +

    We can then state the problem in weak form as follows, by multiplying each equation with test functions $\mathbf v$, $\phi$, and $\sigma$ and integrating terms by parts:

    \begin{eqnarray*}
   \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{u}^{n+1},\mathbf v\right)_\Omega -
   (p^{n+1}, \nabla\cdot\mathbf v)_\Omega &=&
@@ -272,7 +272,7 @@
   (\nabla \cdot\mathbf{u}^{n+1}, \phi)_\Omega &=& (q^{n+1},\phi)_\Omega
 \end{eqnarray*}

    -

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    +

    Note that in the first term, we have to prescribe the pressure $p^{n+1}$ on the boundary $\partial\Omega$ as boundary values for our problem. $\mathbf n$ denotes the unit outward normal vector to $\partial K$, as usual.

    For the saturation equation, we obtain after integrating by parts

    \begin{eqnarray*}
   (S^{n+1}, \sigma)_\Omega
@@ -306,7 +306,7 @@
 </p>
 <p> We introduce an object of type <a class=DiscreteTime in order to keep track of the current value of time and time step in the code. This class encapsulates many complexities regarding adjusting time step size and stopping at a specified final time.

    Space discretization

    -

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    +

    In each time step, we then apply the mixed finite method of step-20 to the velocity and pressure. To be well-posed, we choose Raviart-Thomas spaces $RT_{k}$ for $\mathbf{u}$ and discontinuous elements of class $DGQ_{k}$ for $p$. For the saturation, we will also choose $DGQ_{k}$ spaces.

    Since we have discontinuous spaces, we have to think about how to evaluate terms on the interfaces between cells, since discontinuous functions are not really defined there. In particular, we have to give a meaning to the last term on the left hand side of the saturation equation. To this end, let us define that we want to evaluate it in the following sense:

    \begin{eqnarray*}
   &&\left(F(S^n) (\mathbf n \cdot \mathbf{u}^{n+1}), \sigma\right)_{\partial K}
@@ -320,7 +320,7 @@
 <p> where <picture><source srcset=$\partial K_{-} \dealcoloneq \{x\in \partial K, \mathbf{u}(x) \cdot \mathbf{n}<0\}$ denotes the inflow boundary and $\partial K_{+} \dealcoloneq \{\partial K \setminus
 \partial K_{-}\}$ is the outflow part of the boundary. The quantities $S_+,\mathbf{u}_+$ then correspond to the values of these variables on the present cell, whereas $S_-,\mathbf{u}_-$ (needed on the inflow part of the boundary of $K$) are quantities taken from the neighboring cell. Some more context on discontinuous element techniques and evaluation of fluxes can also be found in step-12.

    Linear solvers

    -

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    +

    The linear solvers used in this program are a straightforward extension of the ones used in step-20 (but without LinearOperator). Essentially, we simply have to extend everything from two to three solution components. If we use the discrete spaces mentioned above and put shape functions into the bilinear forms, we arrive at the following linear system to be solved for time step $n+1$:

    \[
 \left(
 \begin{array}{ccc}
@@ -342,7 +342,7 @@
 \right)
 \]

    -

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    +

    where the individual matrices and vectors are defined as follows using shape functions $\mathbf v_i$ (of type Raviart Thomas $RT_k$) for velocities and $\phi_i$ (of type $DGQ_k$) for both pressures and saturations:

    \begin{eqnarray*}
 M^u(S^n)_{ij} &=&
 \left((\mathbf{K}\lambda(S^n))^{-1} \mathbf{v}_i,\mathbf
@@ -372,7 +372,7 @@
 (S^n,\phi_i)_\Omega +\triangle t \sum_K  \left(F(S^n) q^{n+1}, \phi_i\right)_K.
 \end{eqnarray*}

    -
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.
    +
    Note
    Due to historical accidents, the role of matrices $B$ and $B^T$ has been reverted in this program compared to step-20. In other words, here $B$ refers to the divergence and $B^T$ to the gradient operators when it was the other way around in step-20.

    The system above presents a complication: Since the matrix $H_{ij}$ depends on $\mathbf u^{n+1}$ implicitly (the velocities are needed to determine which parts of the boundaries $\partial K$ of cells are influx or outflux parts), we can only assemble this matrix after we have solved for the velocities.

    The solution scheme then involves the following steps:

    1. @@ -439,7 +439,7 @@ \]" src="form_3349.png"/>

      Note
      Coming back to this testcase in step-43 several years later revealed an oddity in the setup of this testcase. To this end, consider that we can rewrite the advection equation for the saturation as $S_{t} + (\mathbf{u}
-F'(S)) \cdot \nabla S = 0$. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.
      +F'(S)) \cdot \nabla S = 0$" src="form_3350.png"/>. Now, at the initial time, we have $S=0$, and with the given choice of function $F(S)$, we happen to have $F'(0)=0$. In other words, at $t=0$, the equation reduces to $S_t=0$ for all $\mathbf x$, so the saturation is zero everywhere and it is going to stay zero everywhere! This is despite the fact that $\mathbf u$ is not necessarily zero: the combined fluid is moving, but we've chosen our partial flux $F(S)$ in such a way that infinitesimal amounts of wetting fluid also only move at infinitesimal speeds (i.e., they stick to the medium more than the non-wetting phase in which they are embedded). That said, how can we square this with the knowledge that wetting fluid is invading from the left, leading to the flow patterns seen in the results section? That's where we get into mathematics: Equations like the transport equation we are considering here have infinitely many solutions, but only one of them is physical: the one that results from the so-called viscosity limit, called the viscosity solution. The thing is that with discontinuous elements we arrive at this viscosity limit because using a numerical flux introduces a finite amount of artificial viscosity into the numerical scheme. On the other hand, in step-43, we use an artificial viscosity that is proportional to $\|\mathbf u F'(S)\|$ on every cell, which at the initial time is zero. Thus, the saturation there is zero and remains zero; the solution we then get is one solution of the advection equation, but the method does not converge to the viscosity solution without further changes. We will therefore use a different initial condition in that program.

      Finally, to come back to the description of the testcase, we will show results for computations with the two permeability functions introduced at the end of the results section of step-20:

      • A function that models a single, winding crack that snakes through the domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function:

        @@ -464,7 +464,7 @@ e^{-\left(\frac{|\mathbf{x}-\mathbf{x}_i|}{0.05}\right)^2}, \end{eqnarray*}" src="form_3356.png"/>

        - where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.
      • + where the centers $\mathbf{x}_i$ are $N$ randomly chosen locations inside the domain. This function models a domain in which there are $N$ centers of higher permeability (for example where rock has cracked) embedded in a matrix of more pristine, unperturbed background rock. Note that here we have cut off the permeability function both above and below to ensure a bounded condition number.

      The commented program

      This program is an adaptation of step-20 and includes some technique of DG methods from step-12. A good part of the program is therefore very similar to step-20 and we will not comment again on these parts. Only the new stuff will be discussed in more detail.

      @@ -523,7 +523,7 @@
    2. project_back_saturation resets all saturation degrees of freedom with values less than zero to zero, and all those with saturations greater than one to one.
    3. -

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

      +

      The rest of the class should be pretty much obvious. The viscosity variable stores the viscosity $\mu$ that enters several of the formulas in the nonlinear equations. The variable time keeps track of the time information within the simulation.

        template <int dim>
        class TwoPhaseFlowProblem
        {
      @@ -878,7 +878,7 @@

      TwoPhaseFlowProblem class implementation

      Here now the implementation of the main class. Much of it is actually copied from step-20, so we won't comment on it in much detail. You should try to get familiar with that program first, then most of what is happening here should be mostly clear.

      TwoPhaseFlowProblem::TwoPhaseFlowProblem

      -

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

      +

      First for the constructor. We use $RT_k \times DQ_k \times DQ_k$ spaces. For initializing the DiscreteTime object, we don't set the time step size in the constructor because we don't have its value yet. The time step size is initially set to zero, but it will be computed before it is needed to increment time, as described in a subsection of the introduction. The time object internally prevents itself from being incremented when $dt = 0$, forcing us to set a non-zero desired size for $dt$ before advancing time.

        template <int dim>
        TwoPhaseFlowProblem<dim>::TwoPhaseFlowProblem(const unsigned int degree)
        : degree(degree)
      @@ -1135,7 +1135,7 @@
        fe_values.get_function_values(solution, present_solution_values);
       

      First for the cell terms. These are, following the formulas in the introduction, $(S^n,\sigma)-(F(S^n) \mathbf{v}^{n+1},\nabla
-   \sigma)$, where $\sigma$ is the saturation component of the test function:

      + \sigma)$" src="form_3360.png"/>, where $\sigma$ is the saturation component of the test function:

        for (unsigned int q = 0; q < n_q_points; ++q)
        for (unsigned int i = 0; i < dofs_per_cell; ++i)
        {
      @@ -1424,7 +1424,7 @@
      void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >()), const bool project_to_boundary_first=false)

      The main function

      -

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

      +

      That's it. In the main function, we pass the degree of the finite element space to the constructor of the TwoPhaseFlowProblem object. Here, we use zero-th degree elements, i.e. $RT_0\times DQ_0 \times DQ_0$. The rest is as in all the other programs.

        int main()
        {
        try
      @@ -1486,10 +1486,10 @@
      ...

      As we can see, the time step is pretty much constant right from the start, which indicates that the velocities in the domain are not strongly dependent on changes in saturation, although they certainly are through the factor $\lambda(S)$ in the pressure equation.

      Our second observation is that the number of CG iterations needed to solve the pressure Schur complement equation drops from 22 to 17 between the first and the second time step (in fact, it remains around 17 for the rest of the computations). The reason is actually simple: Before we solve for the pressure during a time step, we don't reset the solution variable to zero. The pressure (and the other variables) therefore have the previous time step's values at the time we get into the CG solver. Since the velocities and pressures don't change very much as computations progress, the previous time step's pressure is actually a good initial guess for this time step's pressure. Consequently, the number of iterations we need once we have computed the pressure once is significantly reduced.

      -

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      +

      The final observation concerns the number of iterations needed to solve for the saturation, i.e. one. This shouldn't surprise us too much: the matrix we have to solve with is the mass matrix. However, this is the mass matrix for the $DGQ_0$ element of piecewise constants where no element couples with the degrees of freedom on neighboring cells. The matrix is therefore a diagonal one, and it is clear that we should be able to invert this matrix in a single CG iteration.

      With all this, here are a few movies that show how the saturation progresses over time. First, this is for the single crack model, as implemented in the SingleCurvingCrack::KInverse class:

      -

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      +

      As can be seen, the water rich fluid snakes its way mostly along the high-permeability zone in the middle of the domain, whereas the rest of the domain is mostly impermeable. This and the next movie are generated using n_refinement_steps=7, leading to a $128\times 128$ mesh with some 16,000 cells and about 66,000 unknowns in total.

      The second movie shows the saturation for the random medium model of class RandomMedium::KInverse, where we have randomly distributed centers of high permeability and fluid hops from one of these zones to the next:

      Finally, here is the same situation in three space dimensions, on a mesh with n_refinement_steps=5, which produces a mesh of some 32,000 cells and 167,000 degrees of freedom:

      @@ -1497,24 +1497,24 @@

      To repeat these computations, all you have to do is to change the line

      TwoPhaseFlowProblem<2> two_phase_flow_problem(0);

      in the main function to

      TwoPhaseFlowProblem<3> two_phase_flow_problem(0);

      The visualization uses a cloud technique, where the saturation is indicated by colored but transparent clouds for each cell. This way, one can also see somewhat what happens deep inside the domain. A different way of visualizing would have been to show isosurfaces of the saturation evolving over time. There are techniques to plot isosurfaces transparently, so that one can see several of them at the same time like the layers of an onion.

      -

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      +

      So why don't we show such isosurfaces? The problem lies in the way isosurfaces are computed: they require that the field to be visualized is continuous, so that the isosurfaces can be generated by following contours at least across a single cell. However, our saturation field is piecewise constant and discontinuous. If we wanted to plot an isosurface for a saturation $S=0.5$, chances would be that there is no single point in the domain where that saturation is actually attained. If we had to define isosurfaces in that context at all, we would have to take the interfaces between cells, where one of the two adjacent cells has a saturation greater than and the other cell a saturation less than 0.5. However, it appears that most visualization programs are not equipped to do this kind of transformation.

      Possibilities for extensions

      There are a number of areas where this program can be improved. Three of them are listed below. All of them are, in fact, addressed in a tutorial program that forms the continuation of the current one: step-43.

      Solvers

      -

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      -

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      -

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
-B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      -

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
-\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      +

      At present, the program is not particularly fast: the 2d random medium computation took about a day for the 1,000 or so time steps. The corresponding 3d computation took almost two days for 800 time steps. The reason why it isn't faster than this is twofold. First, we rebuild the entire matrix in every time step, although some parts such as the $B$, $B^T$, and $M^S$ blocks never change.

      +

      Second, we could do a lot better with the solver and preconditioners. Presently, we solve the Schur complement $B^TM^u(S)^{-1}B$ with a CG method, using $[B^T (\textrm{diag}(M^u(S)))^{-1} B]^{-1}$ as a preconditioner. Applying this preconditioner is expensive, since it involves solving a linear system each time. This may have been appropriate for step-20, where we have to solve the entire problem only once. However, here we have to solve it hundreds of times, and in such cases it is worth considering a preconditioner that is more expensive to set up the first time, but cheaper to apply later on.

      +

      One possibility would be to realize that the matrix we use as preconditioner, $B^T (\textrm{diag}(M^u(S)))^{-1} B$ is still sparse, and symmetric on top of that. If one looks at the flow field evolve over time, we also see that while $S$ changes significantly over time, the pressure hardly does and consequently $B^T (\textrm{diag}(M^u(S)))^{-1} B \approx B^T (\textrm{diag}(M^u(S^0)))^{-1}
+B$. In other words, the matrix for the first time step should be a good preconditioner also for all later time steps. With a bit of back-and-forthing, it isn't hard to actually get a representation of it as a SparseMatrix object. We could then hand it off to the SparseMIC class to form a sparse incomplete Cholesky decomposition. To form this decomposition is expensive, but we have to do it only once in the first time step, and can then use it as a cheap preconditioner in the future. We could do better even by using the SparseDirectUMFPACK class that produces not only an incomplete, but a complete decomposition of the matrix, which should yield an even better preconditioner.

      +

      Finally, why use the approximation $B^T (\textrm{diag}(M^u(S)))^{-1} B$ to precondition $B^T M^u(S)^{-1} B$? The latter matrix, after all, is the mixed form of the Laplace operator on the pressure space, for which we use linear elements. We could therefore build a separate matrix $A^p$ on the side that directly corresponds to the non-mixed formulation of the Laplacian, for example using the bilinear form $(\mathbf{K}\lambda(S^n) \nabla
+\varphi_i,\nabla\varphi_j)$. We could then form an incomplete or complete decomposition of this non-mixed matrix and use it as a preconditioner of the mixed form.

      Using such techniques, it can reasonably be expected that the solution process will be faster by at least an order of magnitude.

      Time stepping

      In the introduction we have identified the time step restriction

      -\[
+<picture><source srcset=\[
   \triangle t_{n+1} \le \frac h{|\mathbf{u}^{n+1}(\mathbf{x})|}
-\] +\]" src="form_3373.png"/>

      -

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      +

      that has to hold globally, i.e. for all $\mathbf x$. After discretization, we satisfy it by choosing

      \[
   \triangle t_{n+1} = \frac {\min_K h_K}{\max_{\mathbf{x}}|\mathbf{u}^{n+1}(\mathbf{x})|}.
 \] /usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-11-15 06:44:28.767665663 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_22.html 2024-11-15 06:44:28.767665663 +0000 @@ -180,36 +180,36 @@ This material is based upon work partly supported by the National Science Foundation under Award No. EAR-0426271 and The California Institute of Technology. Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the author and do not necessarily reflect the views of the National Science Foundation or of The California Institute of Technology.

      Introduction

      This program deals with the Stokes system of equations which reads as follows in non-dimensionalized form:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p &=& \textbf{f},
   \\
   -\textrm{div}\; \textbf{u} &=& 0,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3374.png"/>

      -

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
-(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
-12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      +

      where $\textbf u$ denotes the velocity of a fluid, $p$ is its pressure, $\textbf f$ are external forces, and $\varepsilon(\textbf{u})= \nabla^s{\textbf{u}}= \frac 12 \left[
+(\nabla \textbf{u}) + (\nabla \textbf{u})^T\right]$ is the rank-2 tensor of symmetrized gradients; a component-wise definition of it is $\varepsilon(\textbf{u})_{ij}=\frac
+12\left(\frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i}\right)$.

      The Stokes equations describe the steady-state motion of a slow-moving, viscous fluid such as honey, rocks in the earth mantle, or other cases where inertia does not play a significant role. If a fluid is moving fast enough that inertia forces are significant compared to viscous friction, the Stokes equations are no longer valid; taking into account inertia effects then leads to the nonlinear Navier-Stokes equations. However, in this tutorial program, we will focus on the simpler Stokes system.

      Note that when deriving the more general compressible Navier-Stokes equations, the diffusion is modeled as the divergence of the stress tensor

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \tau = - \mu \left(2\varepsilon(\textbf{u}) - \frac{2}{3}\nabla \cdot \textbf{u} I\right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_3378.png"/>

      -

      where $\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mu$ is the viscosity of the fluid. With the assumption of $\mu=1$ (assume constant viscosity and non-dimensionalize the equation by dividing out $\mu$) and assuming incompressibility ( $\textrm{div}\; \textbf{u}=0$), we arrive at the formulation from above:

      +\begin{eqnarray*}
   \textrm{div}\; \tau = -2\textrm{div}\;\varepsilon(\textbf{u}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3381.png"/>

      -

      A different formulation uses the Laplace operator ( $-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      -\begin{eqnarray*}
+<p> A different formulation uses the Laplace operator ( <picture><source srcset=$-\triangle \textbf{u}$) instead of the symmetrized gradient. A big difference here is that the different components of the velocity do not couple. If you assume additional regularity of the solution $\textbf{u}$ (second partial derivatives exist and are continuous), the formulations are equivalent:

      +\begin{eqnarray*}
   \textrm{div}\; \tau
   = -2\textrm{div}\;\varepsilon(\textbf{u})
   = -\triangle \textbf{u} - \nabla \cdot (\nabla\textbf{u})^T
   = -\triangle \textbf{u}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3384.png"/>

      -

      This is because the $i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      -\begin{eqnarray*}
+<p> This is because the <picture><source srcset=$i$th entry of $\nabla \cdot (\nabla\textbf{u})^T$ is given by:

      +\begin{eqnarray*}
 [\nabla \cdot (\nabla\textbf{u})^T]_i
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})^T]_{i,j}
 = \sum_j \frac{\partial}{\partial x_j} [(\nabla\textbf{u})]_{j,i}
@@ -218,14 +218,14 @@
 = \frac{\partial}{\partial x_i}
   \underbrace{\textrm{div}\; \textbf{u}}_{=0}
 = 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3386.png"/>

      If you can not assume the above mentioned regularity, or if your viscosity is not a constant, the equivalence no longer holds. Therefore, we decided to stick with the more physically accurate symmetric tensor formulation in this tutorial.

      To be well-posed, we will have to add boundary conditions to the equations. What boundary conditions are readily possible here will become clear once we discuss the weak form of the equations.

      The equations covered here fall into the class of vector-valued problems. A toplevel overview of this topic can be found in the Handling vector valued problems topic.

      Weak form

      The weak form of the equations is obtained by writing it in vector form as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \begin{pmatrix}
     {-2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p}
     \\
@@ -237,23 +237,23 @@
   \\
   0
   \end{pmatrix},
-\end{eqnarray*} +\end{eqnarray*}" src="form_3387.png"/>

      -

      forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      -\begin{eqnarray*}
+<p> forming the dot product from the left with a vector-valued test function <picture><source srcset=$\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$ and integrating over the domain $\Omega$, yielding the following set of equations:

      +\begin{eqnarray*}
   (\mathrm v,
    -2\; \textrm{div}\; \varepsilon(\textbf{u}) + \nabla p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3389.png"/>

      -

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

      +

      which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

      A generally good rule of thumb is that if one can reduce how many derivatives are taken on any variable in the formulation, then one should in fact do that using integration by parts. (This is motivated by the theory of partial differential equations, and in particular the difference between strong and weak solutions.) We have already done that for the Laplace equation, where we have integrated the second derivative by parts to obtain the weak formulation that has only one derivative on both test and trial function.

      In the current context, we integrate by parts the second term:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\textbf{v}, -2\; \textrm{div}\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   + (\textbf{n}\cdot\textbf{v}, p)_{\partial\Omega}
@@ -261,10 +261,10 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3391.png"/>

      Likewise, we integrate by parts the first term to obtain

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -274,19 +274,19 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3392.png"/>

      where the scalar product between two tensor-valued quantities is here defined as

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\nabla \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\Omega}
   =
   2 \int_\Omega \sum_{i,j=1}^d \frac{\partial v_j}{\partial x_i}
   \varepsilon(\textbf{u})_{ij} \ dx.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3393.png"/>

      -

      Using this, we have now reduced the requirements on our variables to first derivatives for $\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      -

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      -\begin{eqnarray*}
+<p> Using this, we have now reduced the requirements on our variables to first derivatives for <picture><source srcset=$\mathbf u,\mathbf v$ and no derivatives at all for $p,q$.

      +

      Because the scalar product between a general tensor like $\nabla\textbf{v}$ and a symmetric tensor like $\varepsilon(\textbf{u})$ equals the scalar product between the symmetrized forms of the two, we can also write the bilinear form above as follows:

      +\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   -
   (\textbf{n} \otimes \textbf{v}, 2\; \varepsilon(\textbf{u}))_{\partial\Omega}
@@ -296,43 +296,43 @@
   (q,\textrm{div}\; \textbf{u})_{\Omega}
   =
   (\textbf{v}, \textbf{f})_\Omega,
-\end{eqnarray*} +\end{eqnarray*}" src="form_3397.png"/>

      We will deal with the boundary terms in the next section, but it is already clear from the domain terms

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (\varepsilon(\textbf{v}), 2\; \varepsilon(\textbf{u}))_{\Omega}
   - (\textrm{div}\; \textbf{v}, p)_{\Omega}
   -
   (q,\textrm{div}\; \textbf{u})_{\Omega}
-\end{eqnarray*} +\end{eqnarray*}" src="form_3398.png"/>

      of the bilinear form that the Stokes equations yield a symmetric bilinear form, and consequently a symmetric (if indefinite) system matrix.

      Boundary conditions

      Note
      The material presented here is also discussed in video lecture 21.5. (All video lectures are also available here.) (See also video lecture 21.55, video lecture 21.6, video lecture 21.65.)

      The weak form just derived immediately presents us with different possibilities for imposing boundary conditions:

      1. -

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        +

        Dirichlet velocity boundary conditions: On a part $\Gamma_D\subset\partial\Omega$ we may impose Dirichlet conditions on the velocity $\textbf u$:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \textbf u = \textbf g_D \qquad\qquad \textrm{on}\ \Gamma_D.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3400.png"/>

        -

        Because test functions $\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        -\begin{eqnarray*}
+<p> Because test functions <picture><source srcset=$\textbf{v}$ come from the tangent space of the solution variable, we have that $\textbf{v}=0$ on $\Gamma_D$ and consequently that

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_D}
       +
       (\textbf{n}\cdot\textbf{v}, p)_{\Gamma_D}
       = 0.
-    \end{eqnarray*} + \end{eqnarray*}" src="form_3404.png"/>

        In other words, as usual, strongly imposed boundary values do not appear in the weak form.

        It is noteworthy that if we impose Dirichlet boundary values on the entire boundary, then the pressure is only determined up to a constant. An algorithmic realization of that would use similar tools as have been seen in step-11.

      2. -

        Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        -\begin{eqnarray*}
+<p class=Neumann-type or natural boundary conditions: On the rest of the boundary $\Gamma_N=\partial\Omega\backslash\Gamma_D$, let us re-write the boundary terms as follows:

        +\begin{eqnarray*}
       -(\textbf{n} \otimes \mathrm
         v, 2\; \varepsilon(\textbf{u}))_{\Gamma_N}
       +
@@ -362,17 +362,17 @@
       &=&
       (\textbf{v},
        \textbf{n}\cdot [p \textbf{I} - 2\; \varepsilon(\textbf{u})])_{\Gamma_N}.
/usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-11-15 06:44:28.811666056 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_23.html	2024-11-15 06:44:28.811666056 +0000
@@ -145,8 +145,8 @@
  <a class=

        Introduction

        Note
        The material presented here is also discussed in video lecture 28. (All video lectures are also available here.)

        This is the first of a number of tutorial programs that will finally cover "real" time-dependent problems, not the slightly odd form of time dependence found in step-18 or the DAE model of step-21. In particular, this program introduces the wave equation in a bounded domain. Later, step-24 will consider an example of absorbing boundary conditions, and step-25 a kind of nonlinear wave equation producing solutions called solitons.

        -

        The wave equation in its prototypical form reads as follows: find $u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        -\begin{eqnarray*}
+<p>The wave equation in its prototypical form reads as follows: find <picture><source srcset=$u(x,t), x\in\Omega, t\in[0,T]$ that satisfies

        +\begin{eqnarray*}
         \frac{\partial^2 u}{\partial t^2}
         -
         \Delta u &=& f
@@ -164,10 +164,10 @@
         \frac{\partial u(x,0)}{\partial t} &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3483.png"/>

        Note that since this is an equation with second-order time derivatives, we need to pose two initial conditions, one for the value and one for the time derivative of the solution.

        -

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        +

        Physically, the equation describes the motion of an elastic medium. In 2-d, one can think of how a membrane moves if subjected to a force. The Dirichlet boundary conditions above indicate that the membrane is clamped at the boundary at a height $g(x,t)$ (this height might be moving as well — think of people holding a blanket and shaking it up and down). The first initial condition equals the initial deflection of the membrane, whereas the second one gives its velocity. For example, one could think of pushing the membrane down with a finger and then letting it go at $t=0$ (nonzero deflection but zero initial velocity), or hitting it with a hammer at $t=0$ (zero deflection but nonzero velocity). Both cases would induce motion in the membrane.

        Time discretization

        Method of lines or Rothe's method?

        There is a long-standing debate in the numerical analysis community over whether a discretization of time dependent equations should involve first discretizing the time variable leading to a stationary PDE at each time step that is then solved using standard finite element techniques (this is called the Rothe method), or whether one should first discretize the spatial variables, leading to a large system of ordinary differential equations that can then be handled by one of the usual ODE solvers (this is called the method of lines).

        @@ -180,12 +180,12 @@

        Rothe's method!

        Given these considerations, here is how we will proceed: let us first define a simple time stepping method for this second order problem, and then in a second step do the spatial discretization, i.e. we will follow Rothe's approach.

        For the first step, let us take a little detour first: in order to discretize a second time derivative, we can either discretize it directly, or we can introduce an additional variable and transform the system into a first order system. In many cases, this turns out to be equivalent, but dealing with first order systems is often simpler. To this end, let us introduce

        -\[
+<picture><source srcset=\[
         v = \frac{\partial u}{\partial t},
-\] +\]" src="form_3485.png"/>

        and call this variable the velocity for obvious reasons. We can then reformulate the original wave equation as follows:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         \frac{\partial u}{\partial t}
         -
         v
@@ -210,37 +210,37 @@
         v(x,0) &=& u_1(x)
         \qquad
         \textrm{in}\ \Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3486.png"/>

        -

        The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for $v$ at first. However, we could enforce $v=\frac{\partial
-g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        -

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        -\begin{eqnarray*}
+<p> The advantage of this formulation is that it now only contains first time derivatives for both variables, for which it is simple to write down time stepping schemes. Note that we do not have boundary conditions for <picture><source srcset=$v$ at first. However, we could enforce $v=\frac{\partial
+g}{\partial t}$ on the boundary. It turns out in numerical examples that this is actually necessary: without doing so the solution doesn't look particularly wrong, but the Crank-Nicolson scheme does not conserve energy if one doesn't enforce these boundary conditions.

        +

        With this formulation, let us introduce the following time discretization where a superscript $n$ indicates the number of a time step and $k=t_n-t_{n-1}$ is the length of the present time step:

        +\begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k}
   - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,
   \\
   \frac{v^n - v^{n-1}}{k}
   - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& \theta f^n + (1-\theta) f^{n-1}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_3489.png"/>

        -

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        -

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
-- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        +

        Note how we introduced a parameter $\theta$ here. If we chose $\theta=0$, for example, the first equation would reduce to $\frac{u^n - u^{n-1}}{k}  - v^{n-1} = 0$, which is well-known as the forward or explicit Euler method. On the other hand, if we set $\theta=1$, then we would get $\frac{u^n - u^{n-1}}{k}  - v^n = 0$, which corresponds to the backward or implicit Euler method. Both these methods are first order accurate methods. They are simple to implement, but they are not really very accurate.

        +

        The third case would be to choose $\theta=\frac 12$. The first of the equations above would then read $\frac{u^n - u^{n-1}}{k}
+- \frac 12 \left[v^n + v^{n-1}\right] = 0$. This method is known as the Crank-Nicolson method and has the advantage that it is second order accurate. In addition, it has the nice property that it preserves the energy in the solution (physically, the energy is the sum of the kinetic energy of the particles in the membrane plus the potential energy present due to the fact that it is locally stretched; this quantity is a conserved one in the continuous equation, but most time stepping schemes do not conserve it after time discretization). Since $v^n$ also appears in the equation for $u^n$, the Crank-Nicolson scheme is also implicit.

        In the program, we will leave $\theta$ as a parameter, so that it will be easy to play with it. The results section will show some numerical evidence comparing the different schemes.

        -

        The equations above (called the semidiscretized equations because we have only discretized the time, but not space), can be simplified a bit by eliminating $v^n$ from the first equation and rearranging terms. We then get

        -\begin{eqnarray*}
+<p>The equations above (called the <em>semidiscretized</em> equations because we have only discretized the time, but not space), can be simplified a bit by eliminating <picture><source srcset=$v^n$ from the first equation and rearranging terms. We then get

        +\begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
          + k^2\theta\left[\theta f^n + (1-\theta) f^{n-1}\right],\\
    v^n &=& v^{n-1} + k\Delta\left[ \theta u^n + (1-\theta) u^{n-1}\right]
    + k\left[\theta f^n + (1-\theta) f^{n-1}\right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3497.png"/>

        -

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        +

        In this form, we see that if we are given the solution $u^{n-1},v^{n-1}$ of the previous timestep, that we can then solve for the variables $u^n,v^n$ separately, i.e. one at a time. This is convenient. In addition, we recognize that the operator in the first equation is positive definite, and the second equation looks particularly simple.

        Space discretization

        -

        We have now derived equations that relate the approximate (semi-discrete) solution $u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        -\begin{eqnarray*}
+<p>We have now derived equations that relate the approximate (semi-discrete) solution <picture><source srcset=$u^n(x)$ and its time derivative $v^n(x)$ at time $t_n$ with the solutions $u^{n-1}(x),v^{n-1}(x)$ of the previous time step at $t_{n-1}$. The next step is to also discretize the spatial variable using the usual finite element methodology. To this end, we multiply each equation with a test function, integrate over the entire domain, and integrate by parts where necessary. This leads to

        +\begin{eqnarray*}
   (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
   (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
   +
@@ -260,15 +260,15 @@
   \left[
   \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
   \right].
-\end{eqnarray*} +\end{eqnarray*}" src="form_3503.png"/>

        -

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
-U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
-v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
-U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
-V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        +

        It is then customary to approximate $u^n(x) \approx u^n_h(x) = \sum_i
+U_i^n\phi_i^n(x)$, where $\phi_i^n(x)$ are the shape functions used for the discretization of the $n$-th time step and $U_i^n$ are the unknown nodal values of the solution. Similarly, $v^n(x) \approx
+v^n_h(x) = \sum_i V_i^n\phi_i^n(x)$. Finally, we have the solutions of the previous time step, $u^{n-1}(x) \approx u^{n-1}_h(x) = \sum_i
+U_i^{n-1}\phi_i^{n-1}(x)$ and $v^{n-1}(x) \approx v^{n-1}_h(x) = \sum_i
+V_i^{n-1}\phi_i^{n-1}(x)$. Note that since the solution of the previous time step has already been computed by the time we get to time step $n$, $U^{n-1},V^{n-1}$ are known. Furthermore, note that the solutions of the previous step may have been computed on a different mesh, so we have to use shape functions $\phi^{n-1}_i(x)$.

        If we plug these expansions into above equations and test with the test functions from the present mesh, we get the following linear system:

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   (M^n + k^2\theta^2 A^n)U^n &=&
   M^{n,n-1}U^{n-1} - k^2\theta(1-\theta) A^{n,n-1}U^{n-1}
   +
@@ -288,10 +288,10 @@
   \left[
   \theta F^n + (1-\theta) F^{n-1}
   \right],
-\end{eqnarray*} +\end{eqnarray*}" src="form_3512.png"/>

        where

        -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
         M^n_{ij} &=& (\phi_i^n, \phi_j^n),
         \\
         A^n_{ij} &=& (\nabla\phi_i^n, \nabla\phi_j^n),
@@ -303,14 +303,14 @@
         F^n_{i} &=& (f^n,\phi_i^n),
         \\
         F^{n-1}_{i} &=& (f^{n-1},\phi_i^n).
-\end{eqnarray*} +\end{eqnarray*}" src="form_3513.png"/>

        If we solve these two equations, we can move the solution one step forward and go on to the next time step.

        -

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        +

        It is worth noting that if we choose the same mesh on each time step (as we will in fact do in the program below), then we have the same shape functions on time step $n$ and $n-1$, i.e. $\phi^n_i=\phi_i^{n-1}=\phi_i$. Consequently, we get $M^n=M^{n,n-1}=M$ and $A^n=A^{n,n-1}=A$. On the other hand, if we had used different shape functions, then we would have to compute integrals that contain shape functions defined on two meshes. This is a somewhat messy process that we omit here, but that is treated in some detail in step-28.

        Under these conditions (i.e. a mesh that doesn't change), one can optimize the solution procedure a bit by basically eliminating the solution of the second linear system. We will discuss this in the introduction of the step-25 program.

        Energy conservation

        -

        One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by $u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        -\[
+<p>One way to compare the quality of a time stepping scheme is to see whether the numerical approximation preserves conservation properties of the continuous equation. For the wave equation, the natural quantity to look at is the energy. By multiplying the wave equation by <picture><source srcset=$u_t$, integrating over $\Omega$, and integrating by parts where necessary, we find that

        +\[
         \frac{d}{d t}
         \left[\frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx\right]
@@ -319,34 +319,34 @@
         +
         \int_{\partial\Omega} n\cdot\nabla u
         \frac{\partial g}{\partial t} \; dx.
-\] +\]" src="form_3518.png"/>

        By consequence, in absence of body forces and constant boundary values, we get that

        -\[
+<picture><source srcset=\[
         E(t) = \frac 12 \int_\Omega \left(\frac{\partial u}{\partial
         t}\right)^2 + (\nabla u)^2 \; dx
-\] +\]" src="form_3519.png"/>

        -

        is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace $u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        -\[
+<p> is a conserved quantity, i.e. one that doesn't change with time. We will compute this quantity after each time step. It is straightforward to see that if we replace <picture><source srcset=$u$ by its finite element approximation, and $\frac{\partial u}{\partial t}$ by the finite element approximation of the velocity $v$, then

        +\[
         E(t_n) = \frac 12 \left<V^n, M^n V^n\right>
         +
         \frac 12 \left<U^n, A^n U^n\right>.
-\] +\]" src="form_3521.png"/>

        As we will see in the results section, the Crank-Nicolson scheme does indeed conserve the energy, whereas neither the forward nor the backward Euler scheme do.

        Who are Courant, Friedrichs, and Lewy?

        -

        One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width $h$. For the lowest order discretization we use here, the relationship reads

        -\[
+<p>One of the reasons why the wave equation is not easy to solve numerically is that explicit time discretizations are only stable if the time step is small enough. In particular, it is coupled to the spatial mesh width <picture><source srcset=$h$. For the lowest order discretization we use here, the relationship reads

        +\[
         k\le \frac hc
-\] +\]" src="form_3522.png"/>

        -

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        +

        where $c$ is the wave speed, which in our formulation of the wave equation has been normalized to one. Consequently, unless we use the implicit schemes with $\theta>0$, our solutions will not be numerically stable if we violate this restriction. Implicit schemes do not have this restriction for stability, but they become inaccurate if the time step is too large.

        This condition was first recognized by Courant, Friedrichs, and Lewy — in 1928, long before computers became available for numerical computations! (This result appeared in the German language article R. Courant, K. Friedrichs and H. Lewy: Über die partiellen Differenzengleichungen der mathematischen Physik, Mathematische Annalen, vol. 100, no. 1, pages 32-74, 1928.) This condition on the time step is most frequently just referred to as the CFL condition. Intuitively, the CFL condition says that the time step must not be larger than the time it takes a wave to cross a single cell.

        -

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        +

        In the program, we will refine the square $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. step-24 shows a better way how to keep these things in sync.

        The test case

        -

        Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square $[-1,1]^2$ and

        -\begin{eqnarray*}
+<p>Although the program has all the hooks to deal with nonzero initial and boundary conditions and body forces, we take a simple case where the domain is a square <picture><source srcset=$[-1,1]^2$ and

        +\begin{eqnarray*}
         f &=& 0,
         \\
         u_0 &=& 0,
@@ -360,7 +360,7 @@
         &&\text{otherwise}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-11-15 06:44:28.855666449 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_24.html	2024-11-15 06:44:28.855666449 +0000
@@ -162,7 +162,7 @@
 </p>
 <p> Here, <picture><source srcset=$\beta$ is a thermoexpansion coefficient.

        Let us now make the assumption that heating only happens on a time scale much shorter than wave propagation through tissue (i.e. the temporal length of the microwave pulse that heats the tissue is much shorter than the time it takes a wave to cross the domain). In that case, the heating rate $H(t,\mathbf r)$ can be written as $H(t,\mathbf r) = a(\mathbf
-r)\delta(t)$ (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        +r)\delta(t)$" src="form_3560.png"/> (where $a(\mathbf r)$ is a map of absorption strengths for microwave energy and $\delta(t)$ is the Dirac delta function), which together with the first equation above will yield an instantaneous jump in the temperature $T(\mathbf r)$ at time $t=0$. Using this assumption, and taking all equations together, we can rewrite and combine the above as follows:

        \[
 \Delta p-\frac{1}{c_0^2} \frac{\partial^2 p}{\partial t^2} = \lambda
 a(\mathbf r)\frac{d\delta(t)}{dt}
@@ -230,7 +230,7 @@
 (c_0^2k(1-\theta)A-c_0B)\bar{p}^{n-1}-Mv^{n-1}+c_0^2k(\theta F^{n}+(1-\theta)F^{n-1}).
 \end{eqnarray*}

        -

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        +

        The matrices $M$ and $A$ are here as in step-23, and the boundary mass matrix

        \[
         B_{ij} = \left(\varphi_i,\varphi_j\right)_{\partial\Omega}
 \] @@ -307,7 +307,7 @@ \]" src="form_3590.png"/>

        If we use the property of the delta function that $\int_{-\epsilon}^{\epsilon}
-\delta(t)\; dt = 1$, and assume that $P$ is a continuous function in time, we find as we let $\epsilon$ go to zero that

        +\delta(t)\; dt = 1$" src="form_3591.png"/>, and assume that $P$ is a continuous function in time, we find as we let $\epsilon$ go to zero that

        \[
 - \lim_{\epsilon\rightarrow 0}\frac{1}{c_0^2} \left[ p(\epsilon,\mathbf r) - p(-\epsilon,\mathbf r) \right]
 =
@@ -355,7 +355,7 @@
   0.
 \]

        -

        Now, let $\epsilon\rightarrow 0$. Assuming that $P$ is a continuous function in time, we see that

        +

        Now, let $\epsilon\rightarrow 0$. Assuming that $P$ is a continuous function in time, we see that

        \[
   P(\epsilon)-P(-\epsilon) \rightarrow 0,
 \] @@ -454,7 +454,7 @@

        const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
        -

        Here's what's new: first, we need that boundary mass matrix $B$ that came out of the absorbing boundary condition. Likewise, since this time we consider a realistic medium, we must have a measure of the wave speed $c_0$ that will enter all the formulas with the Laplace matrix (which we still define as $(\nabla \phi_i,\nabla \phi_j)$):

        +

        Here's what's new: first, we need that boundary mass matrix $B$ that came out of the absorbing boundary condition. Likewise, since this time we consider a realistic medium, we must have a measure of the wave speed $c_0$ that will enter all the formulas with the Laplace matrix (which we still define as $(\nabla \phi_i,\nabla \phi_j)$):

          SparseMatrix<double> boundary_matrix;
          const double wave_speed;
         
        @@ -542,8 +542,8 @@
        ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

        TATForwardProblem::setup_system

        The following system is pretty much what we've already done in step-23, but with two important differences. First, we have to create a circular (or spherical) mesh around the origin, with a radius of 1. This nothing new: we've done so before in step-6 and step-10, where we also explain how the PolarManifold or SphericalManifold object places new points on concentric circles when a cell is refined, which we will use here as well.

        -

        One thing we had to make sure is that the time step satisfies the CFL condition discussed in the introduction of step-23. Back in that program, we ensured this by hand by setting a timestep that matches the mesh width, but that was error prone because if we refined the mesh once more we would also have to make sure the time step is changed. Here, we do that automatically: we ask a library function for the minimal diameter of any cell. Then we set $k=\frac h{c_0}$. The only problem is: what exactly is $h$? The point is that there is really no good theory on this question for the wave equation. It is known that for uniformly refined meshes consisting of rectangles, $h$ is the minimal edge length. But for meshes on general quadrilaterals, the exact relationship appears to be unknown, i.e. it is unknown what properties of cells are relevant for the CFL condition. The problem is that the CFL condition follows from knowledge of the smallest eigenvalue of the Laplace matrix, and that can only be computed analytically for simply structured meshes.

        -

        The upshot of all this is that we're not quite sure what exactly we should take for $h$. The function GridTools::minimal_cell_diameter computes the minimal diameter of all cells. If the cells were all squares or cubes, then the minimal edge length would be the minimal diameter divided by std::sqrt(dim). We simply generalize this, without theoretical justification, to the case of non-uniform meshes.

        +

        One thing we had to make sure is that the time step satisfies the CFL condition discussed in the introduction of step-23. Back in that program, we ensured this by hand by setting a timestep that matches the mesh width, but that was error prone because if we refined the mesh once more we would also have to make sure the time step is changed. Here, we do that automatically: we ask a library function for the minimal diameter of any cell. Then we set $k=\frac h{c_0}$. The only problem is: what exactly is $h$? The point is that there is really no good theory on this question for the wave equation. It is known that for uniformly refined meshes consisting of rectangles, $h$ is the minimal edge length. But for meshes on general quadrilaterals, the exact relationship appears to be unknown, i.e. it is unknown what properties of cells are relevant for the CFL condition. The problem is that the CFL condition follows from knowledge of the smallest eigenvalue of the Laplace matrix, and that can only be computed analytically for simply structured meshes.

        +

        The upshot of all this is that we're not quite sure what exactly we should take for $h$. The function GridTools::minimal_cell_diameter computes the minimal diameter of all cells. If the cells were all squares or cubes, then the minimal edge length would be the minimal diameter divided by std::sqrt(dim). We simply generalize this, without theoretical justification, to the case of non-uniform meshes.

        The only other significant change is that we need to build the boundary mass matrix. We will comment on this further down below.

          template <int dim>
          void TATForwardProblem<dim>::setup_system()
        @@ -658,7 +658,7 @@
        @ update_values
        Shape function values.
        @ update_JxW_values
        Transformed quadrature weights.

        TATForwardProblem::solve_p and TATForwardProblem::solve_v

        -

        The following two functions, solving the linear systems for the pressure and the velocity variable, are taken pretty much verbatim (with the exception of the change of name from $u$ to $p$ of the primary variable) from step-23:

        +

        The following two functions, solving the linear systems for the pressure and the velocity variable, are taken pretty much verbatim (with the exception of the change of name from $u$ to $p$ of the primary variable) from step-23:

          template <int dim>
          void TATForwardProblem<dim>::solve_p()
          {
        @@ -721,7 +721,7 @@
        DataOutBase::CompressionLevel compression_level

        TATForwardProblem::run

        This function that does most of the work is pretty much again like in step-23, though we make things a bit clearer by using the vectors G1 and G2 mentioned in the introduction. Compared to the overall memory consumption of the program, the introduction of a few temporary vectors isn't doing much harm.

        -

        The only changes to this function are: first, that we do not have to project initial values for the velocity $v$, since we know that it is zero. And second that we evaluate the solution at the detector locations computed in the constructor. This is done using the VectorTools::point_value function. These values are then written to a file that we open at the beginning of the function.

        +

        The only changes to this function are: first, that we do not have to project initial values for the velocity $v$, since we know that it is zero. And second that we evaluate the solution at the detector locations computed in the constructor. This is done using the VectorTools::point_value function. These values are then written to a file that we open at the beginning of the function.

          template <int dim>
          void TATForwardProblem<dim>::run()
          {
        @@ -835,7 +835,7 @@
          return 0;
          }

        Results

        -

        The program writes both graphical data for each time step as well as the values evaluated at each detector location to disk. We then draw them in plots. Experimental data were also collected for comparison. Currently our experiments have only been done in two dimensions by circularly scanning a single detector. The tissue sample here is a thin slice in the $X-Y$ plane ( $Z=0$), and we assume that signals from other $Z$ directions won't contribute to the data. Consequently, we only have to compare our experimental data with two dimensional simulated data.

        +

        The program writes both graphical data for each time step as well as the values evaluated at each detector location to disk. We then draw them in plots. Experimental data were also collected for comparison. Currently our experiments have only been done in two dimensions by circularly scanning a single detector. The tissue sample here is a thin slice in the $X-Y$ plane ( $Z=0$), and we assume that signals from other $Z$ directions won't contribute to the data. Consequently, we only have to compare our experimental data with two dimensional simulated data.

        One absorber

        This movie shows the thermoacoustic waves generated by a single small absorber propagating in the medium (in our simulation, we assume the medium is mineral oil, which has a acoustic speed of 1.437 $\frac{mm}{\mu s}$):

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-11-15 06:44:28.907666914 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_25.html 2024-11-15 06:44:28.907666914 +0000 @@ -166,14 +166,14 @@ \end{eqnarray*}" src="form_3621.png"/>

        Discretization of the equations in time

        -

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        +

        Now, we can discretize the split formulation in time using the $\theta$-method, which has a stencil of only two time steps. By choosing a $\theta\in [0,1]$, the latter discretization allows us to choose from a continuum of schemes. In particular, if we pick $\theta=0$ or $\theta=1$, we obtain the first-order accurate explicit or implicit Euler method, respectively. Another important choice is $\theta=\frac{1}{2}$, which gives the second-order accurate Crank-Nicolson scheme. Henceforth, a superscript $n$ denotes the values of the variables at the $n^{\mathrm{th}}$ time step, i.e. at $t=t_n \dealcoloneq n k$, where $k$ is the (fixed) time step size. Thus, the split formulation of the time-discretized sine-Gordon equation becomes

        \begin{eqnarray*}
   \frac{u^n - u^{n-1}}{k} - \left[\theta v^n + (1-\theta) v^{n-1}\right] &=& 0,\\
   \frac{v^n - v^{n-1}}{k} - \Delta\left[\theta u^n + (1-\theta) u^{n-1}\right]
   &=& -\sin\left[\theta u^n + (1-\theta) u^{n-1}\right].
 \end{eqnarray*}

        -

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        +

        We can simplify the latter via a bit of algebra. Eliminating $v^n$ from the first equation and rearranging, we obtain

        \begin{eqnarray*}
   \left[ 1-k^2\theta^2\Delta \right] u^n &=&
          \left[ 1+k^2\theta(1-\theta)\Delta\right] u^{n-1} + k v^{n-1}
@@ -182,8 +182,8 @@
          - k\sin\left[ \theta u^n + (1-\theta) u^{n-1} \right].
 \end{eqnarray*}

        -

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        -

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        +

        It may seem as though we can just proceed to discretize the equations in space at this point. While this is true for the second equation (which is linear in $v^n$), this would not work for all $\theta$ since the first equation above is nonlinear. Therefore, a nonlinear solver must be implemented, then the equations can be discretized in space and solved.

        +

        To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = 0$, we produce successive approximations to $u^n$ as follows:

        \begin{eqnarray*}
   \mbox{ Find } \delta u^n_l \mbox{ s.t. } F'(u^n_l)\delta u^n_l = -F(u^n_l)
   \mbox{, set }  u^n_{l+1} = u^n_l + \delta u^n_l.
@@ -200,7 +200,7 @@
 </p>
 <p> Notice that while <picture><source srcset=$F(u^n_l)$ is a function, $F'(u^n_l)$ is an operator.

        Weak formulation of the time-discretized equations

        -

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        +

        With hindsight, we choose both the solution and the test space to be $H^1(\Omega)$. Hence, multiplying by a test function $\varphi$ and integrating, we obtain the following variational (or weak) formulation of the split formulation (including the nonlinear solver for the first equation) at each time step:

        \begin{eqnarray*}
   &\mbox{ Find}& \delta u^n_l \in H^1(\Omega) \mbox{ s.t. }
   \left( F'(u^n_l)\delta u^n_l, \varphi \right)_{\Omega}
@@ -214,10 +214,10 @@
          \varphi \right)_{\Omega} \;\forall\varphi\in H^1(\Omega).
 \end{eqnarray*}

        -

        Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, $F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
+<p> Note that the we have used integration by parts and the zero Neumann boundary conditions on all terms involving the Laplacian operator. Moreover, <picture><source srcset=$F(\cdot)$ and $F'(\cdot)$ are as defined above, and $(\cdot,\cdot)_{\Omega}$ denotes the usual $L^2$ inner product over the domain $\Omega$, i.e. $(f,g)_{\Omega} = \int_\Omega fg
 \,\mathrm{d}x$. Finally, notice that the first equation is, in fact, the definition of an iterative procedure, so it is solved multiple times during each time step until a stopping criterion is met.

        Discretization of the weak formulation in space

        -

        Using the Finite Element Method, we discretize the variational formulation in space. To this end, let $V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
+<p>Using the Finite Element Method, we discretize the variational formulation in space. To this end, let <picture><source srcset=$V_h$ be a finite-dimensional $H^1(\Omega)$-conforming finite element space ( $\mathrm{dim}\, V_h = N
 < \infty$) with nodal basis $\{\varphi_1,\ldots,\varphi_N\}$. Now, we can expand all functions in the weak formulation (see above) in terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
 U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
 H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:

        @@ -236,9 +236,9 @@ + k^2\theta^2N(u^n_l,u^{n-1}) \end{eqnarray*}" src="form_3648.png"/>

        -

        Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
+<p> Again, note that the first matrix equation above is, in fact, the definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, <picture><source srcset=$M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
 \varphi_j \right)_{\Omega}$, $S$ is the nonlinear term in the equation that defines our auxiliary velocity variable, i.e. $S_j(f,g) = \left(
-  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
+  \sin\left[ \theta f + (1-\theta) g\right], \varphi_j \right)_{\Omega}$, and $N$ is the nonlinear term in the Jacobian matrix of $F(\cdot)$, i.e. $N_{ij}(f,g) = \left( \cos\left[ \theta f + (1-\theta) g\right]\varphi_i,
   \varphi_j \right)_{\Omega}$.

        What solvers can we use for the first equation? Let's look at the matrix we have to invert:

        \[
@@ -248,12 +248,12 @@
   + k^2 \theta^2 \int_\Omega \nabla\varphi_i\nabla\varphi_j \; dx,
 \]

        -

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        -

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        +

        for some $\alpha$ that depends on the present and previous solution. First, note that the matrix is symmetric. In addition, if the time step $k$ is small enough, i.e. if $k\theta<1$, then the matrix is also going to be positive definite. In the program below, this will always be the case, so we will use the Conjugate Gradient method together with the SSOR method as preconditioner. We should keep in mind, however, that this will fail if we happen to use a bigger time step. Fortunately, in that case the solver will just throw an exception indicating a failure to converge, rather than silently producing a wrong result. If that happens, then we can simply replace the CG method by something that can handle indefinite symmetric systems. The GMRES solver is typically the standard method for all "bad" linear systems, but it is also a slow one. Possibly better would be a solver that utilizes the symmetry, such as, for example, SymmLQ, which is also implemented in deal.II.

        +

        This program uses a clever optimization over step-23 and step-24: If you read the above formulas closely, it becomes clear that the velocity $V$ only ever appears in products with the mass matrix. In step-23 and step-24, we were, therefore, a bit wasteful: in each time step, we would solve a linear system with the mass matrix, only to multiply the solution of that system by $M$ again in the next time step. This can, of course, be avoided, and we do so in this program.

        The test case

        There are a few analytical solutions for the sine-Gordon equation, both in 1D and 2D. In particular, the program as is computes the solution to a problem with a single kink-like solitary wave initial condition. This solution is given by Leibbrandt in Phys. Rev. Lett. 41(7), and is implemented in the ExactSolution class.

        It should be noted that this closed-form solution, strictly speaking, only holds for the infinite-space initial-value problem (not the Neumann initial-boundary-value problem under consideration here). However, given that we impose zero Neumann boundary conditions, we expect that the solution to our initial-boundary-value problem would be close to the solution of the infinite-space initial-value problem, if reflections of waves off the boundaries of our domain do not occur. In practice, this is of course not the case, but we can at least assume that this were so.

        -

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        +

        The constants $\vartheta$ and $\lambda$ in the 2D solution and $\vartheta$, $\phi$ and $\tau$ in the 3D solution are called the Bäcklund transformation parameters. They control such things as the orientation and steepness of the kink. For the purposes of testing the code against the exact solution, one should choose the parameters so that the kink is aligned with the grid.

        The solutions that we implement in the ExactSolution class are these:

        • In 1D:

          @@ -276,7 +276,7 @@ u(x,y,t) = 4 \arctan \left[a_0 e^{s\xi}\right], \]" src="form_3658.png"/>

          -

          where $\xi$ is defined as

          +

          where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + \sin(\vartheta) (y\cosh\lambda + t\sinh \lambda),
   \] @@ -290,7 +290,7 @@ u(x,y,z,t) = 4 \arctan \left[c_0 e^{s\xi}\right], \]" src="form_3661.png"/>

          - where $\xi$ is defined as

          + where $\xi$ is defined as

          \[
     \xi = x \cos\vartheta + y \sin \vartheta \cos\phi +
           \sin \vartheta \sin\phi (z\cosh\tau + t\sinh \tau),
@@ -341,7 +341,7 @@
 <p>The entire algorithm for solving the problem is encapsulated in this class. As in previous example programs, the class is declared with a template parameter, which is the spatial dimension, so that we can solve the sine-Gordon equation in one, two or three spatial dimensions. For more on the dimension-independent class-encapsulation of the problem, the reader should consult <a class=step-3 and step-4.

          Compared to step-23 and step-24, there isn't anything newsworthy in the general structure of the program (though there is of course in the inner workings of the various functions!). The most notable difference is the presence of the two new functions compute_nl_term and compute_nl_matrix that compute the nonlinear contributions to the system matrix and right-hand side of the first equation, as discussed in the Introduction. In addition, we have to have a vector solution_update that contains the nonlinear update to the solution vector in each Newton step.

          As also mentioned in the introduction, we do not store the velocity variable in this program, but the mass matrix times the velocity. This is done in the M_x_velocity variable (the "x" is intended to stand for "times").

          -

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

          +

          Finally, the output_timestep_skip variable stores the number of time steps to be taken each time before graphical output is to be generated. This is of importance when using fine meshes (and consequently small time steps) where we would run lots of time steps and create lots of output files of solutions that look almost the same in subsequent files. This only clogs up our visualization procedures and we should avoid creating more output than we are really interested in. Therefore, if this variable is set to a value $n$ bigger than one, output is generated only every $n$th time step.

            template <int dim>
            class SineGordonProblem
            {
          @@ -488,7 +488,7 @@

          Let's move on to the implementation of the main class, as it implements the algorithm outlined in the introduction.

          SineGordonProblem::SineGordonProblem

          This is the constructor of the SineGordonProblem class. It specifies the desired polynomial degree of the finite elements, associates a DoFHandler to the triangulation object (just as in the example programs step-3 and step-4), initializes the current or initial time, the final time, the time step size, and the value of $\theta$ for the time stepping scheme. Since the solutions we compute here are time-periodic, the actual value of the start-time doesn't matter, and we choose it so that we start at an interesting time.

          -

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

          +

          Note that if we were to chose the explicit Euler time stepping scheme ( $\theta = 0$), then we must pick a time step $k \le h$, otherwise the scheme is not stable and oscillations might arise in the solution. The Crank-Nicolson scheme ( $\theta = \frac{1}{2}$) and the implicit Euler scheme ( $\theta=1$) do not suffer from this deficiency, since they are unconditionally stable. However, even then the time step should be chosen to be on the order of $h$ in order to obtain a good solution. Since we know that our mesh results from the uniform subdivision of a rectangle, we can compute that time step easily; if we had a different domain, the technique in step-24 using GridTools::minimal_cell_diameter would work as well.

            template <int dim>
            SineGordonProblem<dim>::SineGordonProblem()
            : fe(1)
          @@ -503,7 +503,7 @@
           
          STL namespace.

          SineGordonProblem::make_grid_and_dofs

          -

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

          +

          This function creates a rectangular grid in dim dimensions and refines it several times. Also, all matrix and vector members of the SineGordonProblem class are initialized to their appropriate sizes once the degrees of freedom have been assembled. Like step-24, we use MatrixCreator functions to generate a mass matrix $M$ and a Laplace matrix $A$ and store them in the appropriate variables for the remainder of the program's life.

            template <int dim>
            void SineGordonProblem<dim>::make_grid_and_dofs()
            {
          @@ -784,7 +784,7 @@
            << "advancing to t = " << time << '.' << std::endl;
           

          At the beginning of each time step we must solve the nonlinear equation in the split formulation via Newton's method — i.e. solve for $\delta U^{n,l}$ then compute $U^{n,l+1}$ and so on. The stopping criterion for this nonlinear iteration is that $\|F_h(U^{n,l})\|_2 \le 10^{-6} \|F_h(U^{n,0})\|_2$. Consequently, we need to record the norm of the residual in the first iteration.

          -

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

          +

          At the end of each iteration, we output to the console how many linear solver iterations it took us. When the loop below is done, we have (an approximation of) $U^n$.

            double initial_rhs_norm = 0.;
            bool first_iteration = true;
            do
          @@ -808,7 +808,7 @@
           
            std::cout << " CG iterations per nonlinear step." << std::endl;
           
          -

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

          +

          Upon obtaining the solution to the first equation of the problem at $t=t_n$, we must update the auxiliary velocity variable $V^n$. However, we do not compute and store $V^n$ since it is not a quantity we use directly in the problem. Hence, for simplicity, we update $MV^n$ directly:

            Vector<double> tmp_vector(solution.size());
            laplace_matrix.vmult(tmp_vector, solution);
            M_x_velocity.add(-time_step * theta, tmp_vector);
          @@ -867,7 +867,7 @@
            return 0;
            }

          Results

          -

          The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

          +

          The explicit Euler time stepping scheme ( $\theta=0$) performs adequately for the problems we wish to solve. Unfortunately, a rather small time step has to be chosen due to stability issues — $k\sim h/10$ appears to work for most the simulations we performed. On the other hand, the Crank-Nicolson scheme ( $\theta=\frac{1}{2}$) is unconditionally stable, and (at least for the case of the 1D breather) we can pick the time step to be as large as $25h$ without any ill effects on the solution. The implicit Euler scheme ( $\theta=1$) is "exponentially damped," so it is not a good choice for solving the sine-Gordon equation, which is conservative. However, some of the damped schemes in the continuum that is offered by the $\theta$-method were useful for eliminating spurious oscillations due to boundary effects.

          In the simulations below, we solve the sine-Gordon equation on the interval $\Omega =
 [-10,10]$ in 1D and on the square $\Omega = [-10,10]\times [-10,10]$ in 2D. In each case, the respective grid is refined uniformly 6 times, i.e. $h\sim
 2^{-6}$.

          @@ -877,11 +877,11 @@ u_{\mathrm{breather}}(x,t) = -4\arctan \left(\frac{m}{\sqrt{1-m^2}} \frac{\sin\left(\sqrt{1-m^2}t +c_2\right)}{\cosh(mx+c_1)} \right), \]" src="form_3693.png"/>

          -

          where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

          +

          where $c_1$, $c_2$ and $m<1$ are constants. In the simulation below, we have chosen $c_1=0$, $c_2=0$, $m=0.5$. Moreover, it is know that the period of oscillation of the breather is $2\pi\sqrt{1-m^2}$, hence we have chosen $t_0=-5.4414$ and $t_f=2.7207$ so that we can observe three oscillations of the solution. Then, taking $u_0(x) = u_{\mathrm{breather}}(x,t_0)$, $\theta=0$ and $k=h/10$, the program computed the following solution.

          Animation of the 1D stationary breather.

          Though not shown how to do this in the program, another way to visualize the (1+1)-d solution is to use output generated by the DataOutStack class; it allows to "stack" the solutions of individual time steps, so that we get 2D space-time graphs from 1D time-dependent solutions. This produces the space-time plot below instead of the animation above.

          A space-time plot of the 1D stationary breather.

          -

          Furthermore, since the breather is an analytical solution of the sine-Gordon equation, we can use it to validate our code, although we have to assume that the error introduced by our choice of Neumann boundary conditions is small compared to the numerical error. Under this assumption, one could use the VectorTools::integrate_difference function to compute the difference between the numerical solution and the function described by the ExactSolution class of this program. For the simulation shown in the two images above, the $L^2$ norm of the error in the finite element solution at each time step remained on the order of $10^{-2}$. Hence, we can conclude that the numerical method has been implemented correctly in the program.

          +

          Furthermore, since the breather is an analytical solution of the sine-Gordon equation, we can use it to validate our code, although we have to assume that the error introduced by our choice of Neumann boundary conditions is small compared to the numerical error. Under this assumption, one could use the VectorTools::integrate_difference function to compute the difference between the numerical solution and the function described by the ExactSolution class of this program. For the simulation shown in the two images above, the $L^2$ norm of the error in the finite element solution at each time step remained on the order of $10^{-2}$. Hence, we can conclude that the numerical method has been implemented correctly in the program.

          A few (2+1)D Solutions

          The only analytical solution to the sine-Gordon equation in (2+1)D that can be found in the literature is the so-called kink solitary wave. It has the following closed-form expression:

          \[
@@ -894,7 +894,7 @@
   \]

          where $a_0$, $\vartheta$ and $\lambda$ are constants. In the simulation below we have chosen $a_0=\lambda=1$. Notice that if $\vartheta=\pi$ the kink is stationary, hence it would make a good solution against which we can validate the program in 2D because no reflections off the boundary of the domain occur.

          -

          The simulation shown below was performed with $u_0(x) = u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{1}{2}$, $k=20h$, $t_0=1$ and $t_f=500$. The $L^2$ norm of the error of the finite element solution at each time step remained on the order of $10^{-2}$, showing that the program is working correctly in 2D, as well as 1D. Unfortunately, the solution is not very interesting, nonetheless we have included a snapshot of it below for completeness.

          +

          The simulation shown below was performed with $u_0(x) = u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{1}{2}$, $k=20h$, $t_0=1$ and $t_f=500$. The $L^2$ norm of the error of the finite element solution at each time step remained on the order of $10^{-2}$, showing that the program is working correctly in 2D, as well as 1D. Unfortunately, the solution is not very interesting, nonetheless we have included a snapshot of it below for completeness.

          Stationary 2D kink.

          Now that we have validated the code in 1D and 2D, we move to a problem where the analytical solution is unknown.

          To this end, we rotate the kink solution discussed above about the $z$ axis: we let $\vartheta=\frac{\pi}{4}$. The latter results in a solitary wave that is not aligned with the grid, so reflections occur at the boundaries of the domain immediately. For the simulation shown below, we have taken $u_0(x)=u_{\mathrm{kink}}(x,t_0)$, $\theta=\frac{2}{3}$, $k=20h$, $t_0=0$ and $t_f=20$. Moreover, we had to pick $\theta=\frac{2}{3}$ because for any $\theta\le\frac{1}{2}$ oscillations arose at the boundary, which are likely due to the scheme and not the equation, thus picking a value of $\theta$ a good bit into the "exponentially damped" spectrum of the time stepping schemes assures these oscillations are not created.

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 2024-11-15 06:44:28.955667342 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_26.html 2024-11-15 06:44:28.955667342 +0000 @@ -162,7 +162,7 @@ \end{align*}" src="form_3729.png"/>

          In some sense, this equation is simpler than the ones we have discussed in the preceding programs step-23, step-24, step-25, namely the wave equation. This is due to the fact that the heat equation smoothes out the solution over time, and is consequently more forgiving in many regards. For example, when using implicit time stepping methods, we can actually take large time steps, we have less trouble with the small disturbances we introduce through adapting the mesh every few time steps, etc.

          -

          Our goal here will be to solve the equations above using the theta-scheme that discretizes the equation in time using the following approach, where we would like $u^n(\mathbf x)$ to approximate $u(\mathbf x, t_n)$ at some time $t_n$:

          +

          Our goal here will be to solve the equations above using the theta-scheme that discretizes the equation in time using the following approach, where we would like $u^n(\mathbf x)$ to approximate $u(\mathbf x, t_n)$ at some time $t_n$:

          \begin{align*}
   \frac{u^n(\mathbf x)-u^{n-1}(\mathbf x)}{k_n}
   -
@@ -179,8 +179,8 @@
   \right].
 \end{align*}

          -

          Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

          -

          Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

          +

          Here, $k_n=t_n-t_{n-1}$ is the time step size. The theta-scheme generalizes the explicit Euler ( $\theta=0$), implicit Euler ( $\theta=1$) and Crank-Nicolson ( $\theta=\frac 12$) time discretizations. Since the latter has the highest convergence order, we will choose $\theta=\frac 12$ in the program below, but make it so that playing with this parameter remains simple. (If you are interested in playing with higher order methods, take a look at step-52.)

          +

          Given this time discretization, space discretization happens as it always does, by multiplying with test functions, integrating by parts, and then restricting everything to a finite dimensional subspace. This yields the following set of fully discrete equations after multiplying through with $k_n$:

          \begin{align*}
   M U^n-MU^{n-1}
   +
@@ -198,7 +198,7 @@
   \right],
 \end{align*}

          -

          where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

          +

          where $M$ is the mass matrix and $A$ is the stiffness matrix that results from discretizing the Laplacian. Bringing all known quantities to the right hand side yields the linear system we have to solve in every step:

          \begin{align*}
   (M
   +
@@ -224,7 +224,7 @@
 <ul>
 <li>
 <p class=Time step size and minimal mesh size: For stationary problems, the general approach is "make the mesh as fine as it is necessary". For problems with singularities, this often leads to situations where we get many levels of refinement into corners or along interfaces. The very first tutorial to use adaptive meshes, step-6, is a point in case already.

          -

          However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

          +

          However, for time dependent problems, we typically need to choose the time step related to the mesh size. For explicit time discretizations, this is obvious, since we need to respect a CFL condition that ties the time step size to the smallest mesh size. For implicit time discretizations, no such hard restriction exists, but in practice we still want to make the time step smaller if we make the mesh size smaller since we typically have error estimates of the form $\|e\| \le {\cal O}(k^p + h^q)$ where $p,q$ are the convergence orders of the time and space discretization, respectively. We can only make the error small if we decrease both terms. Ideally, an estimate like this would suggest to choose $k \propto h^{q/p}$. Because, at least for problems with non-smooth solutions, the error is typically localized in the cells with the smallest mesh size, we have to indeed choose $k \propto h_{\text{min}}^{q/p}$, using the smallest mesh size.

          The consequence is that refining the mesh further in one place implies not only the moderate additional effort of increasing the number of degrees of freedom slightly, but also the much larger effort of having the solve the global linear system more often because of the smaller time step.

          In practice, one typically deals with this by acknowledging that we can not make the time step arbitrarily small, and consequently can not make the local mesh size arbitrarily small. Rather, we set a maximal level of refinement and when we flag cells for refinement, we simply do not refine those cells whose children would exceed this maximal level of refinement.

          There is a similar problem in that we will choose a right hand side that will switch on in different parts of the domain at different times. To avoid being caught flat footed with too coarse a mesh in areas where we suddenly need a finer mesh, we will also enforce in our program a minimal mesh refinement level.

          @@ -255,7 +255,7 @@ \sum_j U^n \varphi_j(\mathbf x), \end{align*}" src="form_3742.png"/>

          -

          multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

          +

          multiply with test functions $\varphi_i(\mathbf x)$ and integrate by parts where necessary. In a process as outlined above, this would yield

          \begin{align*}
     \sum_j
     (M
@@ -275,7 +275,7 @@
     \right].
   \end{align*}

          -

          Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

          +

          Now imagine that we have changed the mesh between time steps $n-1$ and $n$. Then the problem is that the basis functions we use for $u_h^n$ and $u^{n-1}$ are different! This pertains to the terms on the right hand side, the first of which we could more clearly write as (the second follows the same pattern)

          \begin{align*}
     (\varphi_i, u_h^{n-1})
     =
@@ -287,7 +287,7 @@
     i=1\ldots N_n.
   \end{align*}

          -

          If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

          +

          If the meshes used in these two time steps are the same, then $(\varphi_i^n, \varphi_j^{n-1})$ forms a square mass matrix $M_{ij}$. However, if the meshes are not the same, then in general the matrix is rectangular. Worse, it is difficult to even compute these integrals because if we loop over the cells of the mesh at time step $n$, then we need to evaluate $\varphi_j^{n-1}$ at the quadrature points of these cells, but they do not necessarily correspond to the cells of the mesh at time step $n-1$ and $\varphi_j^{n-1}$ is not defined via these cells; the same of course applies if we wanted to compute the integrals via integration on the cells of mesh $n-1$.

          In any case, what we have to face is a situation where we need to integrate shape functions defined on two different meshes. This can be done, and is in fact demonstrated in step-28, but the process is at best described by the word "awkward".

          In practice, one does not typically want to do this. Rather, we avoid the whole situation by interpolating the solution from the old to the new mesh every time we adapt the mesh. In other words, rather than solving the equations above, we instead solve the problem

          \begin{align*}
@@ -309,14 +309,14 @@
     \right],
   \end{align*}

          -

          where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

          +

          where $I_h^n$ is the interpolation operator onto the finite element space used in time step $n$. This is not the optimal approach since it introduces an additional error besides time and space discretization, but it is a pragmatic one that makes it feasible to do time adapting meshes.

        What could possibly go wrong? Verifying whether the code is correct

        There are a number of things one can typically get wrong when implementing a finite element code. In particular, for time dependent problems, the following are common sources of bugs:

        • The time integration, for example by getting the coefficients in front of the terms involving the current and previous time steps wrong (e.g., mixing up a factor $\theta$ for $1-\theta$).
        • -
        • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
        • -
        • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.
        • +
        • Handling the right hand side, for example forgetting a factor of $k_n$ or $\theta$.
        • +
        • Mishandling the boundary values, again for example forgetting a factor of $k_n$ or $\theta$, or forgetting to apply nonzero boundary values not only to the right hand side but also to the system matrix.

        A less common problem is getting the initial conditions wrong because one can typically see that it is wrong by just outputting the first time step. In any case, in order to verify the correctness of the code, it is helpful to have a testing protocol that allows us to verify each of these components separately. This means:

        • Testing the code with nonzero initial conditions but zero right hand side and boundary values and verifying that the time evolution is correct.
        • @@ -423,7 +423,7 @@ \right. \end{align*}" src="form_3770.png"/>

          -

          In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

          +

          In other words, in every period of length $\tau$, the right hand side first flashes on in domain 1, then off completely, then on in domain 2, then off completely again. This pattern is probably best observed via the little animation of the solution shown in the results section.

          If you interpret the heat equation as finding the spatially and temporally variable temperature distribution of a conducting solid, then the test case above corresponds to an L-shaped body where we keep the boundary at zero temperature, and heat alternatingly in two parts of the domain. While heating is in effect, the temperature rises in these places, after which it diffuses and diminishes again. The point of these initial conditions is that they provide us with a solution that has singularities both in time (when sources switch on and off) as well as time (at the reentrant corner as well as at the edges and corners of the regions where the source acts).

          The commented program

          The program starts with the usual include files, all of which you should have seen before by now:

          @@ -816,7 +816,7 @@
            system_rhs.add(-(1 - theta) * time_step, tmp);
           

          The second piece is to compute the contributions of the source terms. This corresponds to the term $k_n
-   \left[ (1-\theta)F^{n-1} + \theta F^n \right]$. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

          + \left[ (1-\theta)F^{n-1} + \theta F^n \right]$" src="form_3773.png"/>. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

            RightHandSide<dim> rhs_function;
            rhs_function.set_time(time);
          @@ -1022,27 +1022,27 @@

          There are two factors at play. First, there are some islands where cells have been refined but that are surrounded by non-refined cells (and there are probably also a few occasional coarsened islands). These are not terrible, as they most of the time do not affect the approximation quality of the mesh, but they also don't help because so many of their additional degrees of freedom are in fact constrained by hanging node constraints. That said, this is easy to fix: the Triangulation class takes an argument to its constructor indicating a level of "mesh smoothing". Passing one of many possible flags, this instructs the triangulation to refine some additional cells, or not to refine some cells, so that the resulting mesh does not have these artifacts.

          The second problem is more severe: the mesh appears to lag the solution. The underlying reason is that we only adapt the mesh once every fifth time step, and only allow for a single refinement in these cases. Whenever a source switches on, the solution had been very smooth in this area before and the mesh was consequently rather coarse. This implies that the next time step when we refine the mesh, we will get one refinement level more in this area, and five time steps later another level, etc. But this is not enough: first, we should refine immediately when a source switches on (after all, in the current context we at least know what the right hand side is), and we should allow for more than one refinement level. Of course, all of this can be done using deal.II, it just requires a bit of algorithmic thinking in how to make this work!

          Positivity preservation

          -

          To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

          +

          To increase the accuracy and resolution of your simulation in time, one typically decreases the time step size $k_n$. If you start playing around with the time step in this particular example, you will notice that the solution becomes partly negative, if $k_n$ is below a certain threshold. This is not what we would expect to happen (in nature).

          To get an idea of this behavior mathematically, let us consider a general, fully discrete problem:

          \begin{align*}
   A u^{n} = B u^{n-1}.
 \end{align*}

          -

          The general form of the $i$th equation then reads:

          +

          The general form of the $i$th equation then reads:

          \begin{align*}
   a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
   \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
 \end{align*}

          -

          where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

          +

          where $S_i$ is the set of degrees of freedom that DoF $i$ couples with (i.e., for which either the matrix $A$ or matrix $B$ has a nonzero entry at position $(i,j)$). If all coefficients fulfill the following conditions:

          \begin{align*}
   a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
   &
   \forall j &\in S_i,
 \end{align*}

          -

          all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

          -

          Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

          +

          all solutions $u^{n}$ keep their sign from the previous ones $u^{n-1}$, and consequently from the initial values $u^0$. See e.g. Kuzmin, Hämäläinen for more information on positivity preservation.

          +

          Depending on the PDE to solve and the time integration scheme used, one is able to deduce conditions for the time step $k_n$. For the heat equation with the Crank-Nicolson scheme, Schatz et. al. have translated it to the following ones:

          \begin{align*}
   (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
   &
/usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-11-15 06:44:29.011667842 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_27.html	2024-11-15 06:44:29.011667842 +0000
@@ -159,7 +159,7 @@
 </ol> </td> </tr> </table>
  <a class=

          Introduction

          This tutorial program attempts to show how to use $hp$-finite element methods with deal.II. It solves the Laplace equation and so builds only on the first few tutorial programs, in particular on step-4 for dimension independent programming and step-6 for adaptive mesh refinement.

          -

          The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

          +

          The $hp$-finite element method was proposed in the early 1980s by Babuška and Guo as an alternative to either (i) mesh refinement (i.e., decreasing the mesh parameter $h$ in a finite element computation) or (ii) increasing the polynomial degree $p$ used for shape functions. It is based on the observation that increasing the polynomial degree of the shape functions reduces the approximation error if the solution is sufficiently smooth. On the other hand, it is well known that even for the generally well-behaved class of elliptic problems, higher degrees of regularity can not be guaranteed in the vicinity of boundaries, corners, or where coefficients are discontinuous; consequently, the approximation can not be improved in these areas by increasing the polynomial degree $p$ but only by refining the mesh, i.e., by reducing the mesh size $h$. These differing means to reduce the error have led to the notion of $hp$-finite elements, where the approximating finite element spaces are adapted to have a high polynomial degree $p$ wherever the solution is sufficiently smooth, while the mesh width $h$ is reduced at places wherever the solution lacks regularity. It was already realized in the first papers on this method that $hp$-finite elements can be a powerful tool that can guarantee that the error is reduced not only with some negative power of the number of degrees of freedom, but in fact exponentially.

          In order to implement this method, we need several things above and beyond what a usual finite element program needs, and in particular above what we have introduced in the tutorial programs leading up to step-6. In particular, we will have to discuss the following aspects:

          • Instead of using the same finite element on all cells, we now will want a collection of finite element objects, and associate each cell with one of these objects in this collection.

            @@ -236,10 +236,10 @@

            One of the central pieces of the adaptive finite element method is that we inspect the computed solution (a posteriori) with an indicator that tells us which are the cells where the error is largest, and then refine them. In many of the other tutorial programs, we use the KellyErrorEstimator class to get an indication of the size of the error on a cell, although we also discuss more complicated strategies in some programs, most importantly in step-14.

            In any case, as long as the decision is only "refine this cell" or "do not refine this cell", the actual refinement step is not particularly challenging. However, here we have a code that is capable of hp-refinement, i.e., we suddenly have two choices whenever we detect that the error on a certain cell is too large for our liking: we can refine the cell by splitting it into several smaller ones, or we can increase the polynomial degree of the shape functions used on it. How do we know which is the more promising strategy? Answering this question is the central problem in $hp$-finite element research at the time of this writing.

            -

            In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

            +

            In short, the question does not appear to be settled in the literature at this time. There are a number of more or less complicated schemes that address it, but there is nothing like the KellyErrorEstimator that is universally accepted as a good, even if not optimal, indicator of the error. Most proposals use the fact that it is beneficial to increase the polynomial degree whenever the solution is locally smooth whereas it is better to refine the mesh wherever it is rough. However, the questions of how to determine the local smoothness of the solution as well as the decision when a solution is smooth enough to allow for an increase in $p$ are certainly big and important ones.

            In the following, we propose a simple estimator of the local smoothness of a solution. As we will see in the results section, this estimator has flaws, in particular as far as cells with local hanging nodes are concerned. We therefore do not intend to present the following ideas as a complete solution to the problem. Rather, it is intended as an idea to approach it that merits further research and investigation. In other words, we do not intend to enter a sophisticated proposal into the fray about answers to the general question. However, to demonstrate our approach to $hp$-finite elements, we need a simple indicator that does generate some useful information that is able to drive the simple calculations this tutorial program will perform.

            The idea

            -

            Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

            +

            Our approach here is simple: for a function $u({\bf x})$ to be in the Sobolev space $H^s(K)$ on a cell $K$, it has to satisfy the condition

            \[
    \int_K |\nabla^s u({\bf x})|^2 \; d{\bf x} < \infty.
 \] @@ -255,7 +255,7 @@ = \sum_{\bf k} \hat U_{\bf k}\,e^{-i {\bf k}\cdot \hat{\bf x}}, \]" src="form_3790.png"/>

            -

            with Fourier vectors ${\bf k}=(k_x,k_y)$ in 2d, ${\bf k}=(k_x,k_y,k_z)$ in 3d, etc, and $k_x,k_y,k_z=0,2\pi,4\pi,\ldots$. The coefficients of expansion $\hat U_{\bf k}$ can be obtained using $L^2$-orthogonality of the exponential basis

            +

            with Fourier vectors ${\bf k}=(k_x,k_y)$ in 2d, ${\bf k}=(k_x,k_y,k_z)$ in 3d, etc, and $k_x,k_y,k_z=0,2\pi,4\pi,\ldots$. The coefficients of expansion $\hat U_{\bf k}$ can be obtained using $L^2$-orthogonality of the exponential basis

            \[
 \int_{\hat K} e^{-i {\bf m}\cdot \hat{\bf x}} e^{i {\bf n}\cdot \hat{\bf x}} d\hat{\bf x} = \delta_{\bf m \bf n},
 \] @@ -286,7 +286,7 @@ \]" src="form_3800.png"/>

            Put differently: the higher regularity $s$ we want, the faster the Fourier coefficients have to go to zero. If you wonder where the additional exponent $\frac{d-1}2$ comes from: we would like to make use of the fact that $\sum_l a_l < \infty$ if the sequence $a_l =
-{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
+{\cal O}(l^{-1-\epsilon})$ for any $\epsilon>0$. The problem is that we here have a summation not only over a single variable, but over all the integer multiples of $2\pi$ that are located inside the $d$-dimensional sphere, because we have vector components $k_x, k_y,
 \ldots$. In the same way as we prove that the sequence $a_l$ above converges by replacing the sum by an integral over the entire line, we can replace our $d$-dimensional sum by an integral over $d$-dimensional space. Now we have to note that between distance $|{\bf k}|$ and $|{\bf k}|+d|{\bf k}|$, there are, up to a constant, $|{\bf k}|^{d-1}$ modes, in much the same way as we can transform the volume element $dx\;dy$ into $2\pi r\; dr$. Consequently, it is no longer $|{\bf k}|^{2s}|\hat
 U_{\bf k}|^2$ that has to decay as ${\cal O}(|{\bf k}|^{-1-\epsilon})$, but it is in fact $|{\bf k}|^{2s}|\hat U_{\bf k}|^2 |{\bf k}|^{d-1}$. A comparison of exponents yields the result.

            We can turn this around: Assume we are given a function $\hat u$ of unknown smoothness. Let us compute its Fourier coefficients $\hat U_{\bf k}$ and see how fast they decay. If they decay as

            @@ -296,7 +296,7 @@

            then consequently the function we had here was in $H^{\mu-d/2}$.

            What we have to do

            -

            So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

            +

            So what do we have to do to estimate the local smoothness of $u({\bf x})$ on a cell $K$? Clearly, the first step is to compute the Fourier coefficients of our solution. Fourier series being infinite series, we simplify our task by only computing the first few terms of the series, such that $|{\bf k}|\le 2\pi N$ with a cut-off $N$. Let us parenthetically remark that we want to choose $N$ large enough so that we capture at least the variation of those shape functions that vary the most. On the other hand, we should not choose $N$ too large: clearly, a finite element function, being a polynomial, is in $C^\infty$ on any given cell, so the coefficients will have to decay exponentially at one point; since we want to estimate the smoothness of the function this polynomial approximates, not of the polynomial itself, we need to choose a reasonable cutoff for $N$. Either way, computing this series is not particularly hard: from the definition

            \[
    \hat U_{\bf k}
    = \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat u(\hat{\bf x}) d\hat{\bf x}
@@ -311,7 +311,7 @@
    d\hat{\bf x} \right] u_i,
 \]

            -

            where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

            +

            where $u_i$ is the value of the $i$th degree of freedom on this cell. In other words, we can write it as a matrix-vector product

            \[
    \hat U_{\bf k}
    = {\cal F}_{{\bf k},j} u_j,
@@ -324,7 +324,7 @@
    \int_{\hat K} e^{i {\bf k}\cdot \hat{\bf x}} \hat \varphi_j(\hat{\bf x}) d\hat{\bf x}.
 \]

            -

            This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

            +

            This matrix is easily computed for a given number of shape functions $\varphi_j$ and Fourier modes $N$. Consequently, finding the coefficients $\hat U_{\bf k}$ is a rather trivial job. To simplify our life even further, we will use FESeries::Fourier class which does exactly this.

            The next task is that we have to estimate how fast these coefficients decay with $|{\bf k}|$. The problem is that, of course, we have only finitely many of these coefficients in the first place. In other words, the best we can do is to fit a function $\alpha |{\bf k}|^{-\mu}$ to our data points $\hat U_{\bf k}$, for example by determining $\alpha,\mu$ via a least-squares procedure:

            \[
    \min_{\alpha,\mu}
@@ -348,7 +348,7 @@
    \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2,
 \]

            -

            where $\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
+<p> where <picture><source srcset=$\beta=\ln \alpha$. This is now a problem for which the optimality conditions $\frac{\partial Q}{\partial\beta}=0,
 \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can write these conditions as follows:

            \[
    \left(\begin{array}{cc}
@@ -407,11 +407,11 @@
    }.
 \]

            -

            This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

            +

            This is nothing else but linear regression fit and to do that we will use FESeries::linear_regression(). While we are not particularly interested in the actual value of $\beta$, the formula above gives us a mean to calculate the value of the exponent $\mu$ that we can then use to determine that $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$.

            These steps outlined above are applicable to many different scenarios, which motivated the introduction of a generic function SmoothnessEstimator::Fourier::coefficient_decay() in deal.II, that combines all the tasks described in this section in one simple function call. We will use it in the implementation of this program.

            Compensating for anisotropy

            In the formulas above, we have derived the Fourier coefficients $\hat U_{\bf
-k}$. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

            +k}$" src="form_3834.png"/>. Because ${\bf k}$ is a vector, we will get a number of Fourier coefficients $\hat U_{{\bf k}}$ for the same absolute value $|{\bf k}|$, corresponding to the Fourier transform in different directions. If we now consider a function like $|x|y^2$ then we will find lots of large Fourier coefficients in $x$-direction because the function is non-smooth in this direction, but fast-decaying Fourier coefficients in $y$-direction because the function is smooth there. The question that arises is this: if we simply fit our polynomial decay $\alpha |{\bf k}|^\mu$ to all Fourier coefficients, we will fit it to a smoothness averaged in all spatial directions. Is this what we want? Or would it be better to only consider the largest coefficient $\hat U_{{\bf k}}$ for all ${\bf k}$ with the same magnitude, essentially trying to determine the smoothness of the solution in that spatial direction in which the solution appears to be roughest?

            One can probably argue for either case. The issue would be of more interest if deal.II had the ability to use anisotropic finite elements, i.e., ones that use different polynomial degrees in different spatial directions, as they would be able to exploit the directionally variable smoothness much better. Alas, this capability does not exist at the time of writing this tutorial program.

            Either way, because we only have isotopic finite element classes, we adopt the viewpoint that we should tailor the polynomial degree to the lowest amount of regularity, in order to keep numerical efforts low. Consequently, instead of using the formula

            \[
@@ -432,7 +432,7 @@
    }.
 \]

            -

            To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

            +

            To calculate $\mu$ as shown above, we have to slightly modify all sums: instead of summing over all Fourier modes, we only sum over those for which the Fourier coefficient is the largest one among all $\hat U_{{\bf k}}$ with the same magnitude $|{\bf k}|$, i.e., all sums above have to replaced by the following sums:

            \[
   \sum_{{\bf k}, |{\bf k}|\le N}
   \longrightarrow
@@ -442,14 +442,14 @@
 </p>
 <p> This is the form we will implement in the program.</p>
 <p><a class=

            Questions about cell sizes

            -

            One may ask whether it is a problem that we only compute the Fourier transform on the reference cell (rather than the real cell) of the solution. After all, we stretch the solution by a factor $\frac 1h$ during the transformation, thereby shifting the Fourier frequencies by a factor of $h$. This is of particular concern since we may have neighboring cells with mesh sizes $h$ that differ by a factor of 2 if one of them is more refined than the other. The concern is also motivated by the fact that, as we will see in the results section below, the estimated smoothness of the solution should be a more or less continuous function, but exhibits jumps at locations where the mesh size jumps. It therefore seems natural to ask whether we have to compensate for the transformation.

            +

            One may ask whether it is a problem that we only compute the Fourier transform on the reference cell (rather than the real cell) of the solution. After all, we stretch the solution by a factor $\frac 1h$ during the transformation, thereby shifting the Fourier frequencies by a factor of $h$. This is of particular concern since we may have neighboring cells with mesh sizes $h$ that differ by a factor of 2 if one of them is more refined than the other. The concern is also motivated by the fact that, as we will see in the results section below, the estimated smoothness of the solution should be a more or less continuous function, but exhibits jumps at locations where the mesh size jumps. It therefore seems natural to ask whether we have to compensate for the transformation.

            The short answer is "no". In the process outlined above, we attempt to find coefficients $\beta,\mu$ that minimize the sum of squares of the terms

            \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln |{\bf k}|.
 \]

            -

            To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
-k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

            +

            To compensate for the transformation means not attempting to fit a decay $|{\bf k}|^\mu$ with respect to the Fourier frequencies ${\bf k}$ on the unit cell, but to fit the coefficients $\hat U_{{\bf k}}$ computed on the reference cell to the Fourier frequencies on the real cell $|\bf
+k|h$, where $h$ is the norm of the transformation operator (i.e., something like the diameter of the cell). In other words, we would have to minimize the sum of squares of the terms

            \[
    \ln |\hat U_{{\bf k}}| - \beta + \mu \ln (|{\bf k}|h).
 \] @@ -459,7 +459,7 @@ \ln |\hat U_{{\bf k}}| - (\beta - \mu \ln h) + \mu \ln (|{\bf k}|). \]" src="form_3844.png"/>

            -

            In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

            +

            In other words, this and the original least squares problem will produce the same best-fit exponent $\mu$, though the offset will in one case be $\beta$ and in the other $\beta-\mu \ln h$. However, since we are not interested in the offset at all but only in the exponent, it doesn't matter whether we scale Fourier frequencies in order to account for mesh size effects or not, the estimated smoothness exponent will be the same in either case.

            Complications with linear systems for hp-discretizations

            Creating the sparsity pattern

            One of the problems with $hp$-methods is that the high polynomial degree of shape functions together with the large number of constrained degrees of freedom leads to matrices with large numbers of nonzero entries in some rows. At the same time, because there are areas where we use low polynomial degree and consequently matrix rows with relatively few nonzero entries. Consequently, allocating the sparsity pattern for these matrices is a challenge: we cannot simply assemble a SparsityPattern by starting with an estimate of the bandwidth without using a lot of extra memory.

            @@ -473,7 +473,7 @@

            The early tutorial programs use first or second degree finite elements, so removing entries in the sparsity pattern corresponding to constrained degrees of freedom does not have a large impact on the overall number of zeros explicitly stored by the matrix. However, since as many as a third of the degrees of freedom may be constrained in an hp-discretization (and, with higher degree elements, these constraints can couple one DoF to as many as ten or twenty other DoFs), it is worthwhile to take these constraints into consideration since the resulting matrix will be much sparser (and, therefore, matrix-vector products or factorizations will be substantially faster too).

            Eliminating constrained degrees of freedom

            A second problem particular to $hp$-methods arises because we have so many constrained degrees of freedom: typically up to about one third of all degrees of freedom (in 3d) are constrained because they either belong to cells with hanging nodes or because they are on cells adjacent to cells with a higher or lower polynomial degree. This is, in fact, not much more than the fraction of constrained degrees of freedom in non- $hp$-mode, but the difference is that each constrained hanging node is constrained not only against the two adjacent degrees of freedom, but is constrained against many more degrees of freedom.

            -

            It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

            +

            It turns out that the strategy presented first in step-6 to eliminate the constraints while computing the element matrices and vectors with AffineConstraints::distribute_local_to_global is the most efficient approach also for this case. The alternative strategy to first build the matrix without constraints and then "condensing" away constrained degrees of freedom is considerably more expensive. It turns out that building the sparsity pattern by this inefficient algorithm requires at least ${\cal O}(N \log N)$ in the number of unknowns, whereas an ideal finite element program would of course only have algorithms that are linear in the number of unknowns. Timing the sparsity pattern creation as well as the matrix assembly shows that the algorithm presented in step-6 (and used in the code below) is indeed faster.

            In our program, we will also treat the boundary conditions as (possibly inhomogeneous) constraints and eliminate the matrix rows and columns to those as well. All we have to do for this is to call the function that interpolates the Dirichlet boundary conditions already in the setup phase in order to tell the AffineConstraints object about them, and then do the transfer from local to global data on matrix and vector simultaneously. This is exactly what we've shown in step-6.

            The test case

            The test case we will solve with this program is a re-take of the one we already look at in step-14: we solve the Laplace equation

            @@ -719,7 +719,7 @@

            void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const

          LaplaceProblem::solve

          -

          The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

          +

          The function solving the linear system is entirely unchanged from previous examples. We simply try to reduce the initial residual (which equals the $l_2$ norm of the right hand side) by a certain factor:

            template <int dim>
            void LaplaceProblem<dim>::solve()
            {
          @@ -743,7 +743,7 @@

          LaplaceProblem::postprocess

          -

          After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

          +

          After solving the linear system, we will want to postprocess the solution. Here, all we do is to estimate the error, estimate the local smoothness of the solution as described in the introduction, then write graphical output, and finally refine the mesh in both $h$ and $p$ according to the indicators computed before. We do all this in the same function because we want the estimated error and smoothness indicators not only for refinement, but also include them in the graphical output.

            template <int dim>
            void LaplaceProblem<dim>::postprocess(const unsigned int cycle)
            {
          @@ -799,7 +799,7 @@
            }
           
          std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
          Definition utilities.cc:470
          -

          After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

          +

          After this, we would like to actually refine the mesh, in both $h$ and $p$. The way we are going to do this is as follows: first, we use the estimated error to flag those cells for refinement that have the largest error. This is what we have always done:

            {
            estimated_error_per_cell,
          @@ -807,12 +807,12 @@
            0.03);
           
          void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
          -

          Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

          +

          Next we would like to figure out which of the cells that have been flagged for refinement should actually have $p$ increased instead of $h$ decreased. The strategy we choose here is that we look at the smoothness indicators of those cells that are flagged for refinement, and increase $p$ for those with a smoothness larger than a certain relative threshold. In other words, for every cell for which (i) the refinement flag is set, (ii) the smoothness indicator is larger than the threshold, and (iii) we still have a finite element with a polynomial degree higher than the current one in the finite element collection, we will assign a future FE index that corresponds to a polynomial with degree one higher than it currently is. The following function is capable of doing exactly this. Absent any better strategies, we will set the threshold via interpolation between the minimal and maximal smoothness indicators on cells flagged for refinement. Since the corner singularities are strongly localized, we will favor $p$- over $h$-refinement quantitatively. We achieve this with a low threshold by setting a small interpolation factor of 0.2. In the same way, we deal with cells that are going to be coarsened and decrease their polynomial degree when their smoothness indicator is below the corresponding threshold determined on cells to be coarsened.

            dof_handler, smoothness_indicators, 0.2, 0.2);
           
          void p_adaptivity_from_relative_threshold(const DoFHandler< dim, spacedim > &dof_handler, const Vector< Number > &criteria, const double p_refine_fraction=0.5, const double p_coarsen_fraction=0.5, const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_refine=std::greater_equal< Number >(), const ComparisonFunction< std_cxx20::type_identity_t< Number > > &compare_coarsen=std::less_equal< Number >())
          -

          The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

          +

          The above function only determines whether the polynomial degree will change via future FE indices, but does not manipulate the $h$-refinement flags. So for cells that are flagged for both refinement categories, we prefer $p$- over $h$-refinement. The following function call ensures that only one of $p$- or $h$-refinement is imposed, and not both at once.

           
          void choose_p_over_h(const DoFHandler< dim, spacedim > &dof_handler)
          @@ -974,7 +974,7 @@

          The bigger question is, of course, how to avoid this problem. Possibilities include estimating the smoothness not on single cells, but cell assemblies or patches surrounding each cell. It may also be possible to find simple correction factors for each cell depending on the number of constrained degrees of freedom it has. In either case, there are ample opportunities for further research on finding good $hp$-refinement criteria. On the other hand, the main point of the current program was to demonstrate using the $hp$-technology in deal.II, which is unaffected by our use of a possible sub-optimal refinement criterion.

          Possibilities for extensions

          Different hp-decision strategies

          -

          This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

            +

            This tutorial demonstrates only one particular strategy to decide between $h$- and $p$-adaptation. In fact, there are many more ways to automatically decide on the adaptation type, of which a few are already implemented in deal.II:

            • Fourier coefficient decay: This is the strategy currently implemented in this tutorial. For more information on this strategy, see the general documentation of the SmoothnessEstimator::Fourier namespace.

              @@ -985,12 +985,12 @@

            • -

              Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge after changing the discretization of the function space. With $h$-refinement the solution converges algebraically as already pointed out in step-7. If the solution is sufficiently smooth, though, we expect that the solution will converge exponentially with increasing polynomial degree of the finite element. We can compare a proper prediction of the error with the actual error in the following step to see if our choice of adaptation type was justified.

              -

              The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

              +

              Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge after changing the discretization of the function space. With $h$-refinement the solution converges algebraically as already pointed out in step-7. If the solution is sufficiently smooth, though, we expect that the solution will converge exponentially with increasing polynomial degree of the finite element. We can compare a proper prediction of the error with the actual error in the following step to see if our choice of adaptation type was justified.

              +

              The transition to this strategy is a bit more complicated. For this, we need an initialization step with pure $h$- or $p$-refinement and we need to transfer the predicted errors over adapted meshes. The extensive documentation of the hp::Refinement::predict_error() function describes not only the theoretical details of this approach, but also presents a blueprint on how to implement this strategy in your code. For more information, see [melenk2001hp] .

              Note that with this particular function you cannot predict the error for the next time step in time-dependent problems. Therefore, this strategy cannot be applied to this type of problem without further ado. Alternatively, the following approach could be used, which works for all the other strategies as well: start each time step with a coarse mesh, keep refining until happy with the result, and only then move on to the next time step.

            -

            Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

            /usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-11-15 06:44:29.095668592 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_28.html 2024-11-15 06:44:29.095668592 +0000 @@ -210,7 +210,7 @@
          • Absorption $\Sigma_{r,g}(x)\phi_g(x,t)$ (note the negative sign). The coefficient $\Sigma_{r,g}$ is called the removal cross section.
          • -Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • +Nuclear fission $\chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x,t)$. The production of neutrons of energy $g$ is proportional to the flux of neutrons of energy $g'$ times the probability $\Sigma_{f,g'}$ that neutrons of energy $g'$ cause a fission event times the number $\nu$ of neutrons produced in each fission event times the probability that a neutron produced in this event has energy $g$. $\nu\Sigma_{f,g'}$ is called the fission cross section and $\chi_g$ the fission spectrum. We will denote the term $\chi_g\nu\Sigma_{f,g'}$ as the fission distribution cross section in the program.
          • Scattering $\sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x,t)$ of neutrons of energy $g'$ producing neutrons of energy $g$. $\Sigma_{s,g'\to g}$ is called the scattering cross section. The case of elastic, in-group scattering $g'=g$ exists, too, but we subsume this into the removal cross section. The case $g'<g$ is called down-scattering, since a neutron loses energy in such an event. On the other hand, $g'>g$ corresponds to up-scattering: a neutron gains energy in a scattering event from the thermal motion of the atoms surrounding it; up-scattering is therefore only an important process for neutrons with kinetic energies that are already on the same order as the thermal kinetic energy (i.e. in the sub $eV$ range).
          • @@ -232,7 +232,7 @@ s_{\mathrm{ext}}, \end{eqnarray*}" src="form_3873.png"/>

            -

            where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            +

            where $L,F,X$ are sinking, fission, and scattering operators, respectively. $L$ here includes both the diffusion and removal terms. Note that $L$ is symmetric, whereas $F$ and $X$ are not.

            It is well known that this equation admits a stable solution if all eigenvalues of the operator $-L+F+X$ are negative. This can be readily seen by multiplying the equation by $\phi$ and integrating over the domain, leading to

            \begin{eqnarray*}
   \frac 1{2v} \frac{\partial}{\partial t}  \|\phi\|^2 = ((-L+F+X)\phi,\phi).
@@ -354,13 +354,13 @@
   F_i = \int_\Omega f(x) \varphi_g^i(x) \phi_{g'}(x) \ dx,
 \end{eqnarray*}

            -

            where $f(x)$ is one of the coefficient functions $\Sigma_{s,g'\to g}$ or $\nu\chi_g\Sigma_{f,g'}$ used in the right hand side of eigenvalue equation. The difficulty now is that $\phi_{g'}$ is defined on the mesh for energy group $g'$, i.e. it can be expanded as $\phi_{g'}(x)=\sum_j\phi_{g'}^j \varphi_{g'}^j(x)$, with basis functions $\varphi_{g'}^j(x)$ defined on mesh $g'$. The contribution to the right hand side can therefore be written as

            +

            where $f(x)$ is one of the coefficient functions $\Sigma_{s,g'\to g}$ or $\nu\chi_g\Sigma_{f,g'}$ used in the right hand side of eigenvalue equation. The difficulty now is that $\phi_{g'}$ is defined on the mesh for energy group $g'$, i.e. it can be expanded as $\phi_{g'}(x)=\sum_j\phi_{g'}^j \varphi_{g'}^j(x)$, with basis functions $\varphi_{g'}^j(x)$ defined on mesh $g'$. The contribution to the right hand side can therefore be written as

            \begin{eqnarray*}
   F_i = \sum_j \left\{\int_\Omega f(x) \varphi_g^i(x) \varphi_{g'}^j(x)
   \ dx \right\} \phi_{g'}^j ,
 \end{eqnarray*}

            -

            On the other hand, the test functions $\varphi_g^i(x)$ are defined on mesh $g$. This means that we can't just split the integral $\Omega$ into integrals over the cells of either mesh $g$ or $g'$, since the respectively other basis functions may not be defined on these cells.

            +

            On the other hand, the test functions $\varphi_g^i(x)$ are defined on mesh $g$. This means that we can't just split the integral $\Omega$ into integrals over the cells of either mesh $g$ or $g'$, since the respectively other basis functions may not be defined on these cells.

            The solution to this problem lies in the fact that both the meshes for $g$ and $g'$ are derived by adaptive refinement from a common coarse mesh. We can therefore always find a set of cells, which we denote by ${\cal T}_g \cap
 {\cal T}_{g'}$, that satisfy the following conditions:

            • @@ -501,7 +501,7 @@

              Obviously, the arrangement of assemblies as well as the arrangement of rods inside them affect the distribution of neutron fluxes in the reactor (a fact that will be obvious by looking at the solution shown below in the results sections of this program). Fuel rods, for example, differ from each other in the enrichment of U-235 or Pu-239. Control rods, on the other hand, have zero fission, but nonzero scattering and absorption cross sections.

              This whole arrangement would make the description or spatially dependent material parameters very complicated. It will not become much simpler, but we will make one approximation: we merge the volume inhabited by each cylindrical rod and the surrounding water into volumes of quadratic cross section into so-called ‘pin cells’ for which homogenized material data are obtained with nuclear database and knowledge of neutron spectrum. The homogenization makes all material data piecewise constant on the solution domain for a reactor with fresh fuel. Spatially dependent material parameters are then looked up for the quadratic assembly in which a point is located, and then for the quadratic pin cell within this assembly.

              In this tutorial program, we simulate a quarter of a reactor consisting of $4
-\times 4$ assemblies. We use symmetry (Neumann) boundary conditions to reduce the problem to one quarter of the domain, and consequently only simulate a $2\times 2$ set of assemblies. Two of them will be UO ${}_2$ fuel, the other two of them MOX fuel. Each of these assemblies consists of $17\times 17$ rods of different compositions. In total, we therefore create a $34\times 34$ lattice of rods. To make things simpler later on, we reflect this fact by creating a coarse mesh of $34\times 34$ cells (even though the domain is a square, for which we would usually use a single cell). In deal.II, each cell has a material_id which one may use to associated each cell with a particular number identifying the material from which this cell's volume is made of; we will use this material ID to identify which of the 8 different kinds of rods that are used in this testcase make up a particular cell. Note that upon mesh refinement, the children of a cell inherit the material ID, making it simple to track the material even after mesh refinement.

              +\times 4$" src="form_3944.png"/> assemblies. We use symmetry (Neumann) boundary conditions to reduce the problem to one quarter of the domain, and consequently only simulate a $2\times 2$ set of assemblies. Two of them will be UO ${}_2$ fuel, the other two of them MOX fuel. Each of these assemblies consists of $17\times 17$ rods of different compositions. In total, we therefore create a $34\times 34$ lattice of rods. To make things simpler later on, we reflect this fact by creating a coarse mesh of $34\times 34$ cells (even though the domain is a square, for which we would usually use a single cell). In deal.II, each cell has a material_id which one may use to associated each cell with a particular number identifying the material from which this cell's volume is made of; we will use this material ID to identify which of the 8 different kinds of rods that are used in this testcase make up a particular cell. Note that upon mesh refinement, the children of a cell inherit the material ID, making it simple to track the material even after mesh refinement.

              The arrangement of the rods will be clearly visible in the images shown in the results section. The cross sections for materials and for both energy groups are taken from a OECD/NEA benchmark problem. The detailed configuration and material data is given in the code.

              What the program does (and how it does that)

              As a coarse overview of what exactly the program does, here is the basic layout: starting on a coarse mesh that is the same for each energy group, we compute inverse eigenvalue iterations to compute the $k$-eigenvalue on a given set of meshes. We stop these iterations when the change in the eigenvalue drops below a certain tolerance, and then write out the meshes and solutions for each energy group for inspection by a graphics program. Because the meshes for the solutions are different, we have to generate a separate output file for each energy group, rather than being able to add all energy group solutions into the same file.

              /usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 2024-11-15 06:44:29.155669129 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_29.html 2024-11-15 06:44:29.155669129 +0000 @@ -169,7 +169,7 @@ u(x) &=& 1, \qquad x\in\Gamma_1. \end{eqnarray*}" src="form_3967.png"/>

              -

              For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

              +

              For finding suitable conditions on $\Gamma_2$ that model an absorbing boundary, consider a wave of the form $V(x,t)=e^{i(k\cdot x -\omega t)}$ with frequency ${\omega}$ traveling in direction $k\in {\mathrm{R}^2}$. In order for $V$ to solve the wave equation, $|k|={\frac{\omega}{c}}$ must hold. Suppose that this wave hits the boundary in $x_0\in\Gamma_2$ at a right angle, i.e. $n=\frac{k}{|k|}$ with $n$ denoting the outer unit normal of $\Omega$ in $x_0$. Then at $x_0$, this wave satisfies the equation

              \[
 c (n\cdot\nabla V) + \frac{\partial V}{\partial t} = (i\, c\, |k| - i\, \omega) V = 0.
 \] @@ -187,7 +187,7 @@ u &=& 1, \qquad x\in\Gamma_1. \end{eqnarray*}" src="form_3975.png"/>

              -

              This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

              +

              This is a Helmholtz equation (similar to the one in step-7, but this time with ''the bad sign'') with Dirichlet data on $\Gamma_1$ and mixed boundary conditions on $\Gamma_2$. Because of the condition on $\Gamma_2$, we cannot just treat the equations for real and imaginary parts of $u$ separately. What we can do however is to view the PDE for $u$ as a system of two PDEs for the real and imaginary parts of $u$, with the boundary condition on $\Gamma_2$ representing the coupling terms between the two components of the system. This works along the following lines: Let $v=\textrm{Re}\;u,\; w=\textrm{Im}\;u$, then in terms of $v$ and $w$ we have the following system:

              \begin{eqnarray*}
   \left.\begin{array}{ccc}
     -\omega^2 v - c^2\Delta v &=& 0 \quad\\
@@ -205,7 +205,7 @@
   \end{array}\right\} &\;& x\in\Gamma_1.
 \end{eqnarray*}

              -

              For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

              +

              For test functions $\phi,\psi$ with $\phi|_{\Gamma_1}=\psi|_{\Gamma_1}=0$, after the usual multiplication, integration over $\Omega$ and applying integration by parts, we get the weak formulation

              \begin{eqnarray*}
 -\omega^2 \langle \phi, v \rangle_{\mathrm{L}^2(\Omega)}
 + c^2 \langle \nabla \phi, \nabla v \rangle_{\mathrm{L}^2(\Omega)}
@@ -215,7 +215,7 @@
 + c \omega \langle \psi, v \rangle_{\mathrm{L}^2(\Gamma_2)} &=& 0.
 \end{eqnarray*}

              -

              We choose finite element spaces $V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
+<p>We choose finite element spaces <picture><source srcset=$V_h$ and $W_h$ with bases $\{\phi_j\}_{j=1}^n,
 \{\psi_j\}_{j=1}^n$ and look for approximate solutions

              \[
 v_h = \sum_{j=1}^n \alpha_j \phi_j, \;\; w_h = \sum_{j=1}^n \beta_j \psi_j.
@@ -273,10 +273,10 @@
 \right)
 \]

              -

              (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

              +

              (One should not be fooled by the right hand side being zero here, that is because we haven't included the Dirichlet boundary data yet.) Because of the alternating sign in the off-diagonal blocks, we can already see that this system is non-symmetric, in fact it is even indefinite. Of course, there is no necessity to choose the spaces $V_h$ and $W_h$ to be the same. However, we expect real and imaginary part of the solution to have similar properties and will therefore indeed take $V_h=W_h$ in the implementation, and also use the same basis functions $\phi_i = \psi_i$ for both spaces. The reason for the notation using different symbols is just that it allows us to distinguish between shape functions for $v$ and $w$, as this distinction plays an important role in the implementation.

              The test case

              For the computations, we will consider wave propagation in the unit square, with ultrasound generated by a transducer lens that is shaped like a segment of the circle with center at $(0.5, d)$ and a radius slightly greater than $d$; this shape should lead to a focusing of the sound wave at the center of the circle. Varying $d$ changes the "focus" of the lens and affects the spatial distribution of the intensity of $u$, where our main concern is how well $|u|=\sqrt{v^2+w^2}$ is focused.

              -

              In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

              +

              In the program below, we will implement the complex-valued Helmholtz equations using the formulation with split real and imaginary parts. We will also discuss how to generate a domain that looks like a square with a slight bulge simulating the transducer (in the UltrasoundProblem<dim>::make_grid() function), and how to generate graphical output that not only contains the solution components $v$ and $w$, but also the magnitude $\sqrt{v^2+w^2}$ directly in the output file (in UltrasoundProblem<dim>::output_results()). Finally, we use the ParameterHandler class to easily read parameters like the focal distance $d$, wave speed $c$, frequency $\omega$, and a number of other parameters from an input file at run-time, rather than fixing those parameters in the source code where we would have to re-compile every time we want to change parameters.

              The commented program

              Include files

              The following header files have all been discussed before:

              @@ -321,7 +321,7 @@

          The DirichletBoundaryValues class

          First we define a class for the function representing the Dirichlet boundary values. This has been done many times before and therefore does not need much explanation.

          -

          Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

          +

          Since there are two values $v$ and $w$ that need to be prescribed at the boundary, we have to tell the base class that this is a vector-valued function with two components, and the vector_value function and its cousin vector_value_list must return vectors with two entries. In our case the function is very simple, it just returns 1 for the real part $v$ and 0 for the imaginary part $w$ regardless of the point where it is evaluated.

            template <int dim>
            class DirichletBoundaryValues : public Function<dim>
            {
          @@ -437,8 +437,8 @@
           
           

          The ComputeIntensity class

          -

          As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

          -

          So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

          +

          As mentioned in the introduction, the quantity that we are really after is the spatial distribution of the intensity of the ultrasound wave, which corresponds to $|u|=\sqrt{v^2+w^2}$. Now we could just be content with having $v$ and $w$ in our output, and use a suitable visualization or postprocessing tool to derive $|u|$ from the solution we computed. However, there is also a way to output data derived from the solution in deal.II, and we are going to make use of this mechanism here.

          +

          So far we have always used the DataOut::add_data_vector function to add vectors containing output data to a DataOut object. There is a special version of this function that in addition to the data vector has an additional argument of type DataPostprocessor. What happens when this function is used for output is that at each point where output data is to be generated, the DataPostprocessor::evaluate_scalar_field() or DataPostprocessor::evaluate_vector_field() function of the specified DataPostprocessor object is invoked to compute the output quantities from the values, the gradients and the second derivatives of the finite element function represented by the data vector (in the case of face related data, normal vectors are available as well). Hence, this allows us to output any quantity that can locally be derived from the values of the solution and its derivatives. Of course, the ultrasound intensity $|u|$ is such a quantity and its computation doesn't even involve any derivatives of $v$ or $w$.

          In practice, the DataPostprocessor class only provides an interface to this functionality, and we need to derive our own class from it in order to implement the functions specified by the interface. In the most general case one has to implement several member functions but if the output quantity is a single scalar then some of this boilerplate code can be handled by a more specialized class, DataPostprocessorScalar and we can derive from that one instead. This is what the ComputeIntensity class does:

            template <int dim>
            class ComputeIntensity : public DataPostprocessorScalar<dim>
          @@ -455,7 +455,7 @@
          virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const

          In the constructor, we need to call the constructor of the base class with two arguments. The first denotes the name by which the single scalar quantity computed by this class should be represented in output files. In our case, the postprocessor has $|u|$ as output, so we use "Intensity".

          -

          The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

          +

          The second argument is a set of flags that indicate which data is needed by the postprocessor in order to compute the output quantities. This can be any subset of update_values, update_gradients and update_hessians (and, in the case of face data, also update_normal_vectors), which are documented in UpdateFlags. Of course, computation of the derivatives requires additional resources, so only the flags for data that are really needed should be given here, just as we do when we use FEValues objects. In our case, only the function values of $v$ and $w$ are needed to compute $|u|$, so we're good with the update_values flag.

            template <int dim>
            ComputeIntensity<dim>::ComputeIntensity()
            : DataPostprocessorScalar<dim>("Intensity", update_values)
          @@ -463,7 +463,7 @@
           
           
          @ update_values
          Shape function values.
          -

          The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

          +

          The actual postprocessing happens in the following function. Its input is an object that stores values of the function (which is here vector-valued) representing the data vector given to DataOut::add_data_vector, evaluated at all evaluation points where we generate output, and some tensor objects representing derivatives (that we don't use here since $|u|$ is computed from just $v$ and $w$). The derived quantities are returned in the computed_quantities vector. Remember that this function may only use data for which the respective update flag is specified by get_needed_update_flags. For example, we may not use the derivatives here, since our implementation of get_needed_update_flags requests that only function values are provided.

            template <int dim>
            void ComputeIntensity<dim>::evaluate_vector_field(
          @@ -471,7 +471,7 @@
            {
            AssertDimension(computed_quantities.size(), inputs.solution_values.size());
           
          -

          The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

          +

          The computation itself is straightforward: We iterate over each entry in the output vector and compute $|u|$ from the corresponding values of $v$ and $w$. We do this by creating a complex number $u$ and then calling std::abs() on the result. (One may be tempted to call std::norm(), but in a historical quirk, the C++ committee decided that std::norm() should return the square of the absolute value – thereby not satisfying the properties mathematicians require of something called a "norm".)

            for (unsigned int p = 0; p < computed_quantities.size(); ++p)
            {
            AssertDimension(computed_quantities[p].size(), 1);
          @@ -521,7 +521,7 @@
          const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
          -

          The constructor takes the ParameterHandler object and stores it in a reference. It also initializes the DoF-Handler and the finite element system, which consists of two copies of the scalar $Q_1$ field, one for $v$ and one for $w$. In other words, we want the finite element space $Q_1\times Q_1 = Q_1^2$, which is easily constructed and passed as the constructor argument to the FESystem class (i.e., the type of the fe member being initialized here):

          +

          The constructor takes the ParameterHandler object and stores it in a reference. It also initializes the DoF-Handler and the finite element system, which consists of two copies of the scalar $Q_1$ field, one for $v$ and one for $w$. In other words, we want the finite element space $Q_1\times Q_1 = Q_1^2$, which is easily constructed and passed as the constructor argument to the FESystem class (i.e., the type of the fe member being initialized here):

            template <int dim>
            UltrasoundProblem<dim>::UltrasoundProblem(ParameterHandler &param)
            : prm(param)
          @@ -640,7 +640,7 @@
            dofs_per_cell = fe.n_dofs_per_cell();
           
          -

          The FEValues objects will evaluate the shape functions for us. For the part of the bilinear form that involves integration on $\Omega$, we'll need the values and gradients of the shape functions, and of course the quadrature weights. For the terms involving the boundary integrals, only shape function values and the quadrature weights are necessary.

          +

          The FEValues objects will evaluate the shape functions for us. For the part of the bilinear form that involves integration on $\Omega$, we'll need the values and gradients of the shape functions, and of course the quadrature weights. For the terms involving the boundary integrals, only shape function values and the quadrature weights are necessary.

            FEValues<dim> fe_values(fe,
            quadrature_formula,
          @@ -669,11 +669,11 @@
            {
            for (unsigned int j = 0; j < dofs_per_cell; ++j)
            {
          -

          At this point, it is important to keep in mind that we are dealing with a finite element system with two components. Due to the way we constructed this FESystem, namely as the Cartesian product of two scalar finite element fields, each shape function has only a single nonzero component (they are, in deal.II lingo, primitive). Hence, each shape function can be viewed as one of the $\phi$'s or $\psi$'s from the introduction, and similarly the corresponding degrees of freedom can be attributed to either $\alpha$ or $\beta$. As we iterate through all the degrees of freedom on the current cell however, they do not come in any particular order, and so we cannot decide right away whether the DoFs with index $i$ and $j$ belong to the real or imaginary part of our solution. On the other hand, if you look at the form of the system matrix in the introduction, this distinction is crucial since it will determine to which block in the system matrix the contribution of the current pair of DoFs will go and hence which quantity we need to compute from the given two shape functions. Fortunately, the FESystem object can provide us with this information, namely it has a function FESystem::system_to_component_index(), that for each local DoF index returns a pair of integers of which the first indicates to which component of the system the DoF belongs. The second integer of the pair indicates which index the DoF has in the scalar base finite element field, but this information is not relevant here. If you want to know more about this function and the underlying scheme behind primitive vector valued elements, take a look at step-8 or the Handling vector valued problems topic, where these topics are explained in depth.

          +

          At this point, it is important to keep in mind that we are dealing with a finite element system with two components. Due to the way we constructed this FESystem, namely as the Cartesian product of two scalar finite element fields, each shape function has only a single nonzero component (they are, in deal.II lingo, primitive). Hence, each shape function can be viewed as one of the $\phi$'s or $\psi$'s from the introduction, and similarly the corresponding degrees of freedom can be attributed to either $\alpha$ or $\beta$. As we iterate through all the degrees of freedom on the current cell however, they do not come in any particular order, and so we cannot decide right away whether the DoFs with index $i$ and $j$ belong to the real or imaginary part of our solution. On the other hand, if you look at the form of the system matrix in the introduction, this distinction is crucial since it will determine to which block in the system matrix the contribution of the current pair of DoFs will go and hence which quantity we need to compute from the given two shape functions. Fortunately, the FESystem object can provide us with this information, namely it has a function FESystem::system_to_component_index(), that for each local DoF index returns a pair of integers of which the first indicates to which component of the system the DoF belongs. The second integer of the pair indicates which index the DoF has in the scalar base finite element field, but this information is not relevant here. If you want to know more about this function and the underlying scheme behind primitive vector valued elements, take a look at step-8 or the Handling vector valued problems topic, where these topics are explained in depth.

            if (fe.system_to_component_index(i).first ==
            fe.system_to_component_index(j).first)
            {
          -

          If both DoFs $i$ and $j$ belong to same component, i.e. their shape functions are both $\phi$'s or both $\psi$'s, the contribution will end up in one of the diagonal blocks in our system matrix, and since the corresponding entries are computed by the same formula, we do not bother if they actually are $\phi$ or $\psi$ shape functions. We can simply compute the entry by iterating over all quadrature points and adding up their contributions, where values and gradients of the shape functions are supplied by our FEValues object.

          +

          If both DoFs $i$ and $j$ belong to same component, i.e. their shape functions are both $\phi$'s or both $\psi$'s, the contribution will end up in one of the diagonal blocks in our system matrix, and since the corresponding entries are computed by the same formula, we do not bother if they actually are $\phi$ or $\psi$ shape functions. We can simply compute the entry by iterating over all quadrature points and adding up their contributions, where values and gradients of the shape functions are supplied by our FEValues object.

            for (unsigned int q_point = 0; q_point < n_q_points;
            ++q_point)
            cell_matrix(i, j) +=
          @@ -708,7 +708,7 @@
            fe.has_support_on_face(i, face_no) &&
            fe.has_support_on_face(j, face_no))

          The check whether shape functions have support on a face is not strictly necessary: if we don't check for it we would simply add up terms to the local cell matrix that happen to be zero because at least one of the shape functions happens to be zero. However, we can save that work by adding the checks above.

          -

          In either case, these DoFs will contribute to the boundary integrals in the off-diagonal blocks of the system matrix. To compute the integral, we loop over all the quadrature points on the face and sum up the contribution weighted with the quadrature weights that the face quadrature rule provides. In contrast to the entries on the diagonal blocks, here it does matter which one of the shape functions is a $\psi$ and which one is a $\phi$, since that will determine the sign of the entry. We account for this by a simple conditional statement that determines the correct sign. Since we already checked that DoF $i$ and $j$ belong to different components, it suffices here to test for one of them to which component it belongs.

          +

          In either case, these DoFs will contribute to the boundary integrals in the off-diagonal blocks of the system matrix. To compute the integral, we loop over all the quadrature points on the face and sum up the contribution weighted with the quadrature weights that the face quadrature rule provides. In contrast to the entries on the diagonal blocks, here it does matter which one of the shape functions is a $\psi$ and which one is a $\phi$, since that will determine the sign of the entry. We account for this by a simple conditional statement that determines the correct sign. Since we already checked that DoF $i$ and $j$ belong to different components, it suffices here to test for one of them to which component it belongs.

            for (unsigned int q_point = 0; q_point < n_face_q_points;
            ++q_point)
            cell_matrix(i, j) +=
          @@ -776,7 +776,7 @@
          void solve(Vector< double > &rhs_and_solution, const bool transpose=false) const

          UltrasoundProblem::output_results

          -

          Here we output our solution $v$ and $w$ as well as the derived quantity $|u|$ in the format specified in the parameter file. Most of the work for deriving $|u|$ from $v$ and $w$ was already done in the implementation of the ComputeIntensity class, so that the output routine is rather straightforward and very similar to what is done in the previous tutorials.

          +

          Here we output our solution $v$ and $w$ as well as the derived quantity $|u|$ in the format specified in the parameter file. Most of the work for deriving $|u|$ from $v$ and $w$ was already done in the implementation of the ComputeIntensity class, so that the output routine is rather straightforward and very similar to what is done in the previous tutorials.

            template <int dim>
            void UltrasoundProblem<dim>::output_results() const
            {
          @@ -804,7 +804,7 @@
           
            std::ofstream output(filename);
           
          -

          The solution vectors $v$ and $w$ are added to the DataOut object in the usual way:

          +

          The solution vectors $v$ and $w$ are added to the DataOut object in the usual way:

            std::vector<std::string> solution_names;
            solution_names.emplace_back("Re_u");
            solution_names.emplace_back("Im_u");
          @@ -942,8 +942,8 @@
    Finite element order
    |u|
    -

    The first two pictures show the real and imaginary parts of $u$, whereas the last shows the intensity $|u|$. One can clearly see that the intensity is focused around the focal point of the lens (0.5, 0.3), and that the focus is rather sharp in $x$-direction but more blurred in $y$-direction, which is a consequence of the geometry of the focusing lens, its finite aperture, and the wave nature of the problem.

    -

    Because colorful graphics are always fun, and to stress the focusing effects some more, here is another set of images highlighting how well the intensity is actually focused in $x$-direction:

    +

    The first two pictures show the real and imaginary parts of $u$, whereas the last shows the intensity $|u|$. One can clearly see that the intensity is focused around the focal point of the lens (0.5, 0.3), and that the focus is rather sharp in $x$-direction but more blurred in $y$-direction, which is a consequence of the geometry of the focusing lens, its finite aperture, and the wave nature of the problem.

    +

    Because colorful graphics are always fun, and to stress the focusing effects some more, here is another set of images highlighting how well the intensity is actually focused in $x$-direction:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-11-15 06:44:29.207669593 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_3.html 2024-11-15 06:44:29.207669593 +0000 @@ -158,7 +158,7 @@ \end{align*}" src="form_4002.png"/>

    We will solve this equation on the square, $\Omega=[-1,1]^2$, for which you've already learned how to generate a mesh in step-1 and step-2. In this program, we will also only consider the particular case $f(\mathbf x)=1$ and come back to how to implement the more general case in the next tutorial program, step-4.

    -

    If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

    +

    If you've learned about the basics of the finite element method, you will remember the steps we need to take to approximate the solution $u$ by a finite dimensional approximation. Specifically, we first need to derive the weak form of the equation above, which we obtain by multiplying the equation by a test function $\varphi$ from the left (we will come back to the reason for multiplying from the left and not from the right below) and integrating over the domain $\Omega$:

    \begin{align*}
   -\int_\Omega \varphi \Delta u = \int_\Omega \varphi f.
 \end{align*} @@ -171,22 +171,22 @@ = \int_\Omega \varphi f. \end{align*}" src="form_4006.png"/>

    -

    The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

    +

    The test function $\varphi$ has to satisfy the same kind of boundary conditions (in mathematical terms: it needs to come from the tangent space of the set in which we seek the solution), so on the boundary $\varphi=0$ and consequently the weak form we are looking for reads

    \begin{align*}
   (\nabla\varphi, \nabla u)
    = (\varphi, f),
 \end{align*}

    -

    where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

    +

    where we have used the common notation $(a,b)=\int_\Omega a\; b$. The problem then asks for a function $u$ for which this statement is true for all test functions $\varphi$ from the appropriate space (which here is the space $H^1$).

    Of course we can't find such a function on a computer in the general case, and instead we seek an approximation $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf
-x)$, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

    +x)$" src="form_4010.png"/>, where the $U_j$ are unknown expansion coefficients we need to determine (the "degrees of freedom" of this problem), and $\varphi_i(\mathbf x)$ are the finite element shape functions we will use. To define these shape functions, we need the following:

    • A mesh on which to define shape functions. You have already seen how to generate and manipulate the objects that describe meshes in step-1 and step-2.
    • A finite element that describes the shape functions we want to use on the reference cell (which in deal.II is always the unit interval $[0,1]$, the unit square $[0,1]^2$ or the unit cube $[0,1]^3$, depending on which space dimension you work in). In step-2, we had already used an object of type FE_Q<2>, which denotes the usual Lagrange elements that define shape functions by interpolation on support points. The simplest one is FE_Q<2>(1), which uses polynomial degree 1. In 2d, these are often referred to as bilinear, since they are linear in each of the two coordinates of the reference cell. (In 1d, they would be linear and in 3d tri-linear; however, in the deal.II documentation, we will frequently not make this distinction and simply always call these functions "linear".)
    • A DoFHandler object that enumerates all the degrees of freedom on the mesh, taking the reference cell description the finite element object provides as the basis. You've also already seen how to do this in step-2.
    • A mapping that tells how the shape functions on the real cell are obtained from the shape functions defined by the finite element class on the reference cell. By default, unless you explicitly say otherwise, deal.II will use a (bi-, tri-)linear mapping for this, so in most cases you don't have to worry about this step.
    -

    Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

    +

    Through these steps, we now have a set of functions $\varphi_i$, and we can define the weak form of the discrete problem: Find a function $u_h$, i.e., find the expansion coefficients $U_j$ mentioned above, so that

    \begin{align*}
   (\nabla\varphi_i, \nabla u_h)
    = (\varphi_i, f),
@@ -210,7 +210,7 @@
   A U = F,
 \end{align*}

    -

    where the matrix $A$ and the right hand side $F$ are defined as

    +

    where the matrix $A$ and the right hand side $F$ are defined as

    \begin{align*}
   A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j),
   \\
@@ -232,8 +232,8 @@
 <p><a class=

    Assembling the matrix and right hand side vector

    Now we know what we need (namely: objects that hold the matrix and vectors, as well as ways to compute $A_{ij},F_i$), and we can look at what it takes to make that happen:

      -
    • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
    • -
    • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

      +

    • The object for $A$ is of type SparseMatrix while those for $U$ and $F$ are of type Vector. We will see in the program below what classes are used to solve linear systems.
    • +
    • We need a way to form the integrals. In the finite element method, this is most commonly done using quadrature, i.e. the integrals are replaced by a weighted sum over a set of quadrature points on each cell. That is, we first split the integral over $\Omega$ into integrals over all cells,

      \begin{align*}
     A_{ij} &= (\nabla\varphi_i, \nabla \varphi_j)
     = \sum_{K \in {\mathbb T}} \int_K \nabla\varphi_i \cdot \nabla \varphi_j,
@@ -256,9 +256,9 @@
     \sum_q \varphi_i(\mathbf x^K_q) f(\mathbf x^K_q) w^K_q,
   \end{align*}

      - where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
    • + where $\mathbb{T} \approx \Omega$ is a Triangulation approximating the domain, $\mathbf x^K_q$ is the $q$th quadrature point on cell $K$, and $w^K_q$ the $q$th quadrature weight. There are different parts to what is needed in doing this, and we will discuss them in turn next.
    • First, we need a way to describe the location $\mathbf x_q^K$ of quadrature points and their weights $w^K_q$. They are usually mapped from the reference cell in the same way as shape functions, i.e., implicitly using the MappingQ1 class or, if you explicitly say so, through one of the other classes derived from Mapping. The locations and weights on the reference cell are described by objects derived from the Quadrature base class. Typically, one chooses a quadrature formula (i.e. a set of points and weights) so that the quadrature exactly equals the integral in the matrix; this can be achieved because all factors in the integral are polynomial, and is done by Gaussian quadrature formulas, implemented in the QGauss class.
    • -
    • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.
    • +
    • We then need something that can help us evaluate $\varphi_i(\mathbf x^K_q)$ on cell $K$. This is what the FEValues class does: it takes a finite element objects to describe $\varphi$ on the reference cell, a quadrature object to describe the quadrature points and weights, and a mapping object (or implicitly takes the MappingQ1 class) and provides values and derivatives of the shape functions on the real cell $K$ as well as all sorts of other information needed for integration, at the quadrature points located on $K$.

    The process of computing the matrix and right hand side as a sum over all cells (and then a sum over quadrature points) is usually called assembling the linear system, or assembly for short, using the meaning of the word related to assembly line, meaning "the act of putting together a set of pieces, fragments, or elements".

    FEValues really is the central class in the assembly process. One way you can view it is as follows: The FiniteElement and derived classes describe shape functions, i.e., infinite dimensional objects: functions have values at every point. We need this for theoretical reasons because we want to perform our analysis with integrals over functions. However, for a computer, this is a very difficult concept, since they can in general only deal with a finite amount of information, and so we replace integrals by sums over quadrature points that we obtain by mapping (the Mapping object) using points defined on a reference cell (the Quadrature object) onto points on the real cell. In essence, we reduce the problem to one where we only need a finite amount of information, namely shape function values and derivatives, quadrature weights, normal vectors, etc, exclusively at a finite set of points. The FEValues class is the one that brings the three components together and provides this finite set of information on a particular cell $K$. You will see it in action when we assemble the linear system below.

    @@ -267,16 +267,16 @@
    Note
    The preceding overview of all the important steps of any finite element implementation has its counterpart in deal.II: The library can naturally be grouped into a number of "topics" that cover the basic concepts just outlined. You can access these topics through the "Topics" tab at the top of this page. An overview of the most fundamental groups of concepts is also available on the front page of the deal.II manual.

    Solving the linear system

    For a finite element program, the linear system we end up with here is relatively small: The matrix has size $1089 \times 1089$, owing to the fact that the mesh we use is $32\times 32$ and so there are $33^2=1089$ vertices in the mesh. In many of the later tutorial programs, matrix sizes in the range of tens of thousands to hundreds of thousands will not be uncommon, and with codes such as ASPECT that build on deal.II, we regularly solve problems with more than a hundred million equations (albeit using parallel computers). In any case, even for the small system here, the matrix is much larger than what one typically encounters in an undergraduate or most graduate courses, and so the question arises how we can solve such linear systems.

    -

    The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

    +

    The first method one typically learns for solving linear systems is Gaussian elimination. The problem with this method is that it requires a number of operations that is proportional to $N^3$, where $N$ is the number of equations or unknowns in the linear system – more specifically, the number of operations is $\frac 23 N^3$, give or take a few. With $N=1089$, this means that we would have to do around $861$ million operations. This is a number that is quite feasible and it would take modern processors less than 0.1 seconds to do this. But it is clear that this isn't going to scale: If we have twenty times as many equations in the linear system (that is, twenty times as many unknowns), then it would already take 1000-10,000 seconds or on the order of an hour. Make the linear system another ten times larger, and it is clear that we can not solve it any more on a single computer.

    One can rescue the situation somewhat by realizing that only a relatively small number of entries in the matrix are nonzero – that is, the matrix is sparse. Variations of Gaussian elimination can exploit this, making the process substantially faster; we will use one such method – implemented in the SparseDirectUMFPACK class – in step-29 for the first time, among several others than come after that. These variations of Gaussian elimination might get us to problem sizes on the order of 100,000 or 200,000, but not all that much beyond that.

    -

    Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

    +

    Instead, what we will do here is take up an idea from 1952: the Conjugate Gradient method, or in short "CG". CG is an "iterative" solver in that it forms a sequence of vectors that converge to the exact solution; in fact, after $N$ such iterations in the absence of roundoff errors it finds the exact solution if the matrix is symmetric and positive definite. The method was originally developed as another way to solve a linear system exactly, like Gaussian elimination, but as such it had few advantages and was largely forgotten for a few decades. But, when computers became powerful enough to solve problems of a size where Gaussian elimination doesn't work well any more (sometime in the 1980s), CG was rediscovered as people realized that it is well suited for large and sparse systems like the ones we get from the finite element method. This is because (i) the vectors it computes converge to the exact solution, and consequently we do not actually have to do all $N$ iterations to find the exact solution as long as we're happy with reasonably good approximations; and (ii) it only ever requires matrix-vector products, which is very useful for sparse matrices because a sparse matrix has, by definition, only ${\cal O}(N)$ entries and so a matrix-vector product can be done with ${\cal O}(N)$ effort whereas it costs $N^2$ operations to do the same for dense matrices. As a consequence, we can hope to solve linear systems with at most ${\cal O}(N^2)$ operations, and in many cases substantially fewer.

    Finite element codes therefore almost always use iterative solvers such as CG for the solution of the linear systems, and we will do so in this code as well. (We note that the CG method is only usable for matrices that are symmetric and positive definite; for other equations, the matrix may not have these properties and we will have to use other variations of iterative solvers such as BiCGStab or GMRES that are applicable to more general matrices.)

    -

    An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

    +

    An important component of these iterative solvers is that we specify the tolerance with which we want to solve the linear system – in essence, a statement about the error we are willing to accept in our approximate solution. The error in an approximate solution $\tilde x$ obtained to the exact solution $x$ of a linear system $Ax=b$ is defined as $\|x-\tilde x\|$, but this is a quantity we cannot compute because we don't know the exact solution $x$. Instead, we typically consider the residual, defined as $\|b-A\tilde x\|=\|A(x-\tilde x)\|$, as a computable measure. We then let the iterative solver compute more and more accurate solutions $\tilde x$, until $\|b-A\tilde x\|\le \tau$. A practical question is what value $\tau$ should have. In most applications, setting

    \begin{align*}
   \tau = 10^{-6} \|b\|
 \end{align*}

    -

    is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

    +

    is a reasonable choice. The fact that we make $\tau$ proportional to the size (norm) of $b$ makes sure that our expectations of the accuracy in the solution are relative to the size of the solution. This makes sense: If we make the right hand side $b$ ten times larger, then the solution $x$ of $Ax=b$ will also be ten times larger, and so will $\tilde x$; we want the same number of accurate digits in $\tilde x$ as before, which means that we should also terminate when the residual $\|b-A\tilde x\|$ is ten times the original size – which is exactly what we get if we make $\tau$ proportional to $\|b\|$.

    All of this will be implemented in the Step3::solve() function in this program. As you will see, it is quite simple to set up linear solvers with deal.II: The whole function will have only three lines.

    About the implementation

    Although this is the simplest possible equation you can solve using the finite element method, this program shows the basic structure of most finite element programs and also serves as the template that almost all of the following programs will essentially follow. Specifically, the main class of this program looks like this:

    class Step3
    @@ -505,7 +505,7 @@

    Now it is time to start integration over the cell, which we do by looping over all quadrature points, which we will number by q_index.

      for (const unsigned int q_index : fe_values.quadrature_point_indices())
      {
    -

    First assemble the matrix: For the Laplace problem, the matrix on each cell is the integral over the gradients of shape function i and j. Since we do not integrate, but rather use quadrature, this is the sum over all quadrature points of the integrands times the determinant of the Jacobian matrix at the quadrature point times the weight of this quadrature point. You can get the gradient of shape function $i$ at quadrature point with number q_index by using fe_values.shape_grad(i,q_index); this gradient is a 2-dimensional vector (in fact it is of type Tensor<1,dim>, with here dim=2) and the product of two such vectors is the scalar product, i.e. the product of the two shape_grad function calls is the dot product. This is in turn multiplied by the Jacobian determinant and the quadrature point weight (that one gets together by the call to FEValues::JxW() ). Finally, this is repeated for all shape functions $i$ and $j$:

    +

    First assemble the matrix: For the Laplace problem, the matrix on each cell is the integral over the gradients of shape function i and j. Since we do not integrate, but rather use quadrature, this is the sum over all quadrature points of the integrands times the determinant of the Jacobian matrix at the quadrature point times the weight of this quadrature point. You can get the gradient of shape function $i$ at quadrature point with number q_index by using fe_values.shape_grad(i,q_index); this gradient is a 2-dimensional vector (in fact it is of type Tensor<1,dim>, with here dim=2) and the product of two such vectors is the scalar product, i.e. the product of the two shape_grad function calls is the dot product. This is in turn multiplied by the Jacobian determinant and the quadrature point weight (that one gets together by the call to FEValues::JxW() ). Finally, this is repeated for all shape functions $i$ and $j$:

      for (const unsigned int i : fe_values.dof_indices())
      for (const unsigned int j : fe_values.dof_indices())
      cell_matrix(i, j) +=
    @@ -631,7 +631,7 @@
    |u| |u|
    Visualization of the solution of step-3
    -

    It shows both the solution and the mesh, elevated above the $x$- $y$ plane based on the value of the solution at each point. Of course the solution here is not particularly exciting, but that is a result of both what the Laplace equation represents and the right hand side $f(\mathbf x)=1$ we have chosen for this program: The Laplace equation describes (among many other uses) the vertical deformation of a membrane subject to an external (also vertical) force. In the current example, the membrane's borders are clamped to a square frame with no vertical variation; a constant force density will therefore intuitively lead to a membrane that simply bulges upward – like the one shown above.

    +

    It shows both the solution and the mesh, elevated above the $x$- $y$ plane based on the value of the solution at each point. Of course the solution here is not particularly exciting, but that is a result of both what the Laplace equation represents and the right hand side $f(\mathbf x)=1$ we have chosen for this program: The Laplace equation describes (among many other uses) the vertical deformation of a membrane subject to an external (also vertical) force. In the current example, the membrane's borders are clamped to a square frame with no vertical variation; a constant force density will therefore intuitively lead to a membrane that simply bulges upward – like the one shown above.

    VisIt and Paraview both allow playing with various kinds of visualizations of the solution. Several video lectures show how to use these programs. See also video lecture 11, video lecture 32.

    Possibilities for extensions

    If you want to play around a little bit with this program, here are a few suggestions:

    @@ -934,7 +934,7 @@
    solution = np.array(file["/solution"])
    x, y = nodes.T
    -

    The following stores the $x$ and $y$ coordinates of each node of each cell in one flat array.

    cell_x = x[cells.flatten()]
    +

    The following stores the $x$ and $y$ coordinates of each node of each cell in one flat array.

    cell_x = x[cells.flatten()]
    cell_y = y[cells.flatten()]

    The following tags the cell ids. Each four entries correspond to one cell. Then we collect the coordinates and ids into a data frame

    n_cells = cells.shape[0]
    cell_ids = np.repeat(np.arange(n_cells), 4)
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-11-15 06:44:29.267670129 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_30.html 2024-11-15 06:44:29.267670129 +0000 @@ -233,7 +233,7 @@

    Implementation

    Most of the time, when we do finite element computations, we only consider one cell at a time, for example to calculate cell contributions to the global matrix, or to interpolate boundary values. However, sometimes we have to look at how cells are related in our algorithms. Relationships between cells come in two forms: neighborship and mother-child relationship. For the case of isotropic refinement, deal.II uses certain conventions (invariants) for cell relationships that are always maintained. For example, a refined cell always has exactly $2^{dim}$ children. And (except for the 1d case), two neighboring cells may differ by at most one refinement level: they are equally often refined or one of them is exactly once more refined, leaving exactly one hanging node on the common face. Almost all of the time these invariants are only of concern in the internal implementation of the library. However, there are cases where knowledge of them is also relevant to an application program.

    In the current context, it is worth noting that the kind of mesh refinement affects some of the most fundamental assumptions. Consequently, some of the usual code found in application programs will need modifications to exploit the features of meshes which were created using anisotropic refinement. For those interested in how deal.II evolved, it may be of interest that the loosening of such invariants required some incompatible changes. For example, the library used to have a member GeometryInfo<dim>::children_per_cell that specified how many children a cell has once it is refined. For isotropic refinement, this number is equal to $2^{dim}$, as mentioned above. However, for anisotropic refinement, this number does not exist, as is can be either two or four in 2D and two, four or eight in 3D, and the member GeometryInfo<dim>::children_per_cell has consequently been removed. It has now been replaced by GeometryInfo<dim>::max_children_per_cell which specifies the maximum number of children a cell can have. How many children a refined cell has was previously available as static information, but now it depends on the actual refinement state of a cell and can be retrieved using TriaAccessor::n_children(), a call that works equally well for both isotropic and anisotropic refinement. A very similar situation can be found for faces and their subfaces: the pertinent information can be queried using GeometryInfo<dim>::max_children_per_face or face->n_children(), depending on the context.

    -

    Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

    +

    Another important aspect, and the most important one in this tutorial, is the treatment of neighbor-relations when assembling jump terms on the faces between cells. Looking at the documentation of the assemble_system functions in step-12 we notice, that we need to decide if a neighboring cell is coarser, finer or on the same (refinement) level as our current cell. These decisions do not work in the same way for anisotropic refinement as the information given by the level of a cell is not enough to completely characterize anisotropic cells; for example, are the terminal children of a two-dimensional cell that is first cut in $x$-direction and whose children are then cut in $y$-direction on level 2, or are they on level 1 as they would be if the cell would have been refined once isotropically, resulting in the same set of finest cells?

    After anisotropic refinement, a coarser neighbor is not necessarily exactly one level below ours, but can pretty much have any level relative to the current one; in fact, it can even be on a higher level even though it is coarser. Thus the decisions have to be made on a different basis, whereas the intention of the decisions stays the same.

    In the following, we will discuss the cases that can happen when we want to compute contributions to the matrix (or right hand side) of the form

    \[
@@ -244,7 +244,7 @@
 <ul>
 <li>
 <p class=Finer neighbor: If we are on an active cell and want to integrate over a face $f\subset \partial K$, the first possibility is that the neighbor behind this face is more refined, i.e. has children occupying only part of the common face. In this case, the face under consideration has to be a refined one, which can determine by asking if (face->has_children()). If this is true, we need to loop over all subfaces and get the neighbors' child behind this subface, so that we can reinit an FEFaceValues object with the neighbor and an FESubfaceValues object with our cell and the respective subface.

    -

    For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

      +

      For isotropic refinement, this kind is reasonably simple because we know that an invariant of the isotropically refined adaptive meshes in deal.II is that neighbors can only differ by exactly one refinement level. However, this isn't quite true any more for anisotropically refined meshes, in particular in 3d; there, the active cell we are interested on the other side of $f$ might not actually be a child of our neighbor, but perhaps a grandchild or even a farther offspring. Fortunately, this complexity is hidden in the internals of the library. All we need to do is call the CellAccessor::neighbor_child_on_subface() function. Still, in 3D there are two cases which need special consideration:

      • If the neighbor is refined more than once anisotropically, it might be that here are not two or four but actually three subfaces to consider. Imagine the following refinement process of the (two-dimensional) face of the (three-dimensional) neighbor cell we are considering: first the face is refined along x, later on only the left subface is refined along y.

        -------* *---*---* *---*---*
        | | | | | | | |
        @@ -266,7 +266,7 @@
        # # # + + +
        # ## + ++
        ############# +++++++++++++
        -

        Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

        +

    Here, the left two cells resulted from an anisotropic bisection of the mother cell in $y$-direction, whereas the right four cells resulted from a simultaneous anisotropic refinement in both the $y$- and $z$-directions. The left cell marked with # has two finer neighbors marked with +, but the actual neighbor of the left cell is the complete right mother cell, as the two cells marked with + are finer and their direct mother is the one large cell.

    However, fortunately, CellAccessor::neighbor_child_on_subface() takes care of these situations by itself, if you loop over the correct number of subfaces, in the above example this is two. The FESubfaceValues<dim>::reinit function takes care of this too, so that the resulting state is always correct. There is one little caveat, however: For reiniting the neighbors FEFaceValues object you need to know the index of the face that points toward the current cell. Usually you assume that the neighbor you get directly is as coarse or as fine as you, if it has children, thus this information can be obtained with CellAccessor::neighbor_of_neighbor(). If the neighbor is coarser, however, you would have to use the first value in CellAccessor::neighbor_of_coarser_neighbor() instead. In order to make this easy for you, there is CellAccessor::neighbor_face_no() which does the correct thing for you and returns the desired result.

    @@ -307,12 +307,12 @@

    This approach is similar to the one we have used in step-27 for hp-refinement and has the great advantage of flexibility: Any error indicator can be used in the anisotropic process, i.e. if you have quite involved a posteriori goal-oriented error indicators available you can use them as easily as a simple Kelly error estimator. The anisotropic part of the refinement process is not influenced by this choice. Furthermore, simply leaving out the third and forth steps leads to the same isotropic refinement you used to get before any anisotropic changes in deal.II or your application program. As a last advantage, working only on cells flagged for refinement results in a faster evaluation of the anisotropic indicator, which can become noticeable on finer meshes with a lot of cells if the indicator is quite involved.

    Here, we use a very simple approach which is only applicable to DG methods. The general idea is quite simple: DG methods allow the discrete solution to jump over the faces of a cell, whereas it is smooth within each cell. Of course, in the limit we expect that the jumps tend to zero as we refine the mesh and approximate the true solution better and better. Thus, a large jump across a given face indicates that the cell should be refined (at least) orthogonally to that face, whereas a small jump does not lead to this conclusion. It is possible, of course, that the exact solution is not smooth and that it also features a jump. In that case, however, a large jump over one face indicates, that this face is more or less parallel to the jump and in the vicinity of it, thus again we would expect a refinement orthogonal to the face under consideration to be effective.

    -

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    +

    The proposed indicator calculates the average jump $K_j$, i.e. the mean value of the absolute jump $|[u]|$ of the discrete solution $u$ over the two faces $f_i^j$, $i=1,2$, $j=1..d$ orthogonal to coordinate direction $j$ on the unit cell.

    \[
 K_j = \frac{\sum_{i=1}^2 \int_{f_i^j}|[u]| dx}{\sum_{i=1}^2 |f_i^j|} .
 \]

    -

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    +

    If the average jump in one direction is larger than the average of the jumps in the other directions by a certain factor $\kappa$, i.e. if $K_i > \kappa \frac 1{d-1} \sum_{j=1, j\neq i}^d K_j$, the cell is refined only along that particular direction $i$, otherwise the cell is refined isotropically.

    Such a criterion is easily generalized to systems of equations: the absolute value of the jump would be replaced by an appropriate norm of the vector-valued jump.

    The problem

    We solve the linear transport equation presented in step-12. The domain is extended to cover $[-1,1]\times[0,1]$ in 2D, where the flow field $\beta$ describes a counterclockwise quarter circle around the origin in the right half of the domain and is parallel to the x-axis in the left part of the domain. The inflow boundary is again located at $x=1$ and along the positive part of the x-axis, and the boundary conditions are chosen as in step-12.

    @@ -394,7 +394,7 @@
    virtual void value_list(const std::vector< Point< dim > > &points, std::vector< RangeNumberType > &values, const unsigned int component=0) const
    Definition point.h:111
    #define AssertDimension(dim1, dim2)
    -

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

    +

    The flow field is chosen to be a quarter circle with counterclockwise flow direction and with the origin as midpoint for the right half of the domain with positive $x$ values, whereas the flow simply goes to the left in the left part of the domain at a velocity that matches the one coming in from the right. In the circular part the magnitude of the flow velocity is proportional to the distance from the origin. This is a difference to step-12, where the magnitude was 1 everywhere. the new definition leads to a linear variation of $\beta$ along each given face of a cell. On the other hand, the solution $u(x,y)$ is exactly the same as before.

      void value_list(const std::vector<Point<dim>> &points,
      std::vector<Point<dim>> &values) const
      {
    @@ -1355,7 +1355,7 @@

    We see, that the solution on the anisotropically refined mesh is very similar to the solution obtained on the isotropically refined mesh. Thus the anisotropic indicator seems to effectively select the appropriate cells for anisotropic refinement.

    -

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    +

    The pictures also explain why the mesh is refined as it is. In the whole left part of the domain refinement is only performed along the $y$-axis of cells. In the right part of the domain the refinement is dominated by isotropic refinement, as the anisotropic feature of the solution - the jump from one to zero - is not well aligned with the mesh where the advection direction takes a turn. However, at the bottom and closest (to the observer) parts of the quarter circle this jumps again becomes more and more aligned with the mesh and the refinement algorithm reacts by creating anisotropic cells of increasing aspect ratio.

    It might seem that the necessary alignment of anisotropic features and the coarse mesh can decrease performance significantly for real world problems. That is not wrong in general: If one were, for example, to apply anisotropic refinement to problems in which shocks appear (e.g., the equations solved in step-69), then it many cases the shock is not aligned with the mesh and anisotropic refinement will help little unless one also introduces techniques to move the mesh in alignment with the shocks. On the other hand, many steep features of solutions are due to boundary layers. In those cases, the mesh is already aligned with the anisotropic features because it is of course aligned with the boundary itself, and anisotropic refinement will almost always increase the efficiency of computations on adapted grids for these cases.

    The plain program

    /* ------------------------------------------------------------------------
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-11-15 06:44:29.375671094 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_31.html 2024-11-15 06:44:29.375671094 +0000 @@ -200,11 +200,11 @@ \nabla \cdot \kappa \nabla T &=& \gamma. \end{eqnarray*}" src="form_4072.png"/>

    -

    These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the Handling vector valued problems topic). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
+<p> These equations fall into the class of vector-valued problems (a toplevel overview of this topic can be found in the <a class=Handling vector valued problems topic). Here, $\mathbf u$ is the velocity field, $p$ the pressure, and $T$ the temperature of the fluid. $\varepsilon ({\mathbf u}) = \frac 12
 [(\nabla{\mathbf u}) + (\nabla {\mathbf u})^T]$ is the symmetric gradient of the velocity. As can be seen, velocity and pressure solve a Stokes equation describing the motion of an incompressible fluid, an equation we have previously considered in step-22; we will draw extensively on the experience we have gained in that program, in particular with regard to efficient linear Stokes solvers.

    The forcing term of the fluid motion is the buoyancy of the fluid, expressed as the product of the density $\rho$, the thermal expansion coefficient $\beta$, the temperature $T$ and the gravity vector $\mathbf{g}$ pointing downward. (A derivation of why the right hand side looks like it looks is given in the introduction of step-32.) While the first two equations describe how the fluid reacts to temperature differences by moving around, the third equation states how the fluid motion affects the temperature field: it is an advection diffusion equation, i.e., the temperature is attached to the fluid particles and advected along in the flow field, with an additional diffusion (heat conduction) term. In many applications, the diffusion coefficient is fairly small, and the temperature equation is in fact transport, not diffusion dominated and therefore in character more hyperbolic than elliptic; we will have to take this into account when developing a stable discretization.

    -

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    -

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    +

    In the equations above, the term $\gamma$ on the right hand side denotes the heat sources and may be a spatially and temporally varying function. $\eta$ and $\kappa$ denote the viscosity and diffusivity coefficients, which we assume constant for this tutorial program. The more general case when $\eta$ depends on the temperature is an important factor in physical applications: Most materials become more fluid as they get hotter (i.e., $\eta$ decreases with $T$); sometimes, as in the case of rock minerals at temperatures close to their melting point, $\eta$ may change by orders of magnitude over the typical range of temperatures.

    +

    We note that the Stokes equation above could be nondimensionalized by introducing the Rayleigh number $\mathrm{Ra}=\frac{\|\mathbf{g}\| \beta \rho}{\eta \kappa} \delta T L^3$ using a typical length scale $L$, typical temperature difference $\delta T$, density $\rho$, thermal diffusivity $\eta$, and thermal conductivity $\kappa$. $\mathrm{Ra}$ is a dimensionless number that describes the ratio of heat transport due to convection induced by buoyancy changes from temperature differences, and of heat transport due to thermal diffusion. A small Rayleigh number implies that buoyancy is not strong relative to viscosity and fluid motion $\mathbf{u}$ is slow enough so that heat diffusion $\kappa\nabla T$ is the dominant heat transport term. On the other hand, a fluid with a high Rayleigh number will show vigorous convection that dominates heat conduction.

    For most fluids for which we are interested in computing thermal convection, the Rayleigh number is very large, often $10^6$ or larger. From the structure of the equations, we see that this will lead to large pressure differences and large velocities. Consequently, the convection term in the convection-diffusion equation for $T$ will also be very large and an accurate solution of this equation will require us to choose small time steps. Problems with large Rayleigh numbers are therefore hard to solve numerically for similar reasons that make solving the Navier-Stokes equations hard to solve when the Reynolds number $\mathrm{Re}$ is large.

    Note that a large Rayleigh number does not necessarily involve large velocities in absolute terms. For example, the Rayleigh number in the earth mantle is larger than $10^6$. Yet the velocities are small: the material is in fact solid rock but it is so hot and under pressure that it can flow very slowly, on the order of at most a few centimeters per year. Nevertheless, this can lead to mixing over time scales of many million years, a time scale much shorter than for the same amount of heat to be distributed by thermal conductivity and a time scale of relevance to affect the evolution of the earth's interior and surface structure.

    Note
    If you are interested in using the program as the basis for your own experiments, you will also want to take a look at its continuation in step-32. Furthermore, step-32 later was developed into the much larger open source code ASPECT (see https://aspect.geodynamics.org/ ) that can solve realistic problems and that you may want to investigate before trying to morph step-31 into something that can solve whatever you want to solve.
    @@ -215,7 +215,7 @@

    Similarly, the velocity field requires us to pose boundary conditions. These may be no-slip no-flux conditions $\mathbf{u}=0$ on $\partial\Omega$ if the fluid sticks to the boundary, or no normal flux conditions $\mathbf n \cdot \mathbf
 u = 0$ if the fluid can flow along but not across the boundary, or any number of other conditions that are physically reasonable. In this program, we will use no normal flux conditions.

    Solution approach

    -

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    +

    Like the equations solved in step-21, we here have a system of differential-algebraic equations (DAE): with respect to the time variable, only the temperature equation is a differential equation whereas the Stokes system for $\mathbf{u}$ and $p$ has no time-derivatives and is therefore of the sort of an algebraic constraint that has to hold at each time instant. The main difference to step-21 is that the algebraic constraint there was a mixed Laplace system of the form

    \begin{eqnarray*}
   \mathbf u + {\mathbf K}\lambda \nabla p &=& 0, \\
   \nabla\cdot \mathbf u &=& f,
@@ -238,7 +238,7 @@
   \nabla \cdot {\mathbf u}^{n-1} &=& 0,
 \end{eqnarray*}

    -

    and then the temperature equation with an extrapolated velocity field to time $n$.

    +

    and then the temperature equation with an extrapolated velocity field to time $n$.

    In contrast to step-21, we'll use a higher order time stepping scheme here, namely the Backward Differentiation Formula scheme of order 2 (BDF-2 in short) that replaces the time derivative $\frac{\partial T}{\partial t}$ by the (one-sided) difference quotient $\frac{\frac 32 T^{n}-2T^{n-1}+\frac 12 T^{n-2}}{k}$ with $k$ the time step size. This gives the discretized-in-time temperature equation

    \begin{eqnarray*}
   \frac 32 T^n
@@ -254,7 +254,7 @@
   k\gamma.
 \end{eqnarray*}

    -

    Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity ${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
+<p> Note how the temperature equation is solved semi-explicitly: diffusion is treated implicitly whereas advection is treated explicitly using an extrapolation (or forward-projection) of temperature and velocity, including the just-computed velocity <picture><source srcset=${\mathbf u}^{n-1}$. The forward-projection to the current time level $n$ is derived from a Taylor expansion, $T^n
 \approx T^{n-1} + k_n \frac{\partial T}{\partial t} \approx T^{n-1} + k_n
 \frac{T^{n-1}-T^{n-2}}{k_n} = 2T^{n-1}-T^{n-2}$. We need this projection for maintaining the order of accuracy of the BDF-2 scheme. In other words, the temperature fields we use in the explicit right hand side are second order approximations of the current temperature field — not quite an explicit time stepping scheme, but by character not too far away either.

    The introduction of the temperature extrapolation limits the time step by a Courant-Friedrichs-Lewy (CFL) condition just like it was in step-21. (We wouldn't have had that stability condition if we treated the advection term implicitly since the BDF-2 scheme is A-stable, at the price that we needed to build a new temperature matrix at each time step.) We will discuss the exact choice of time step in the results section, but for the moment of importance is that this CFL condition means that the time step size $k$ may change from time step to time step, and that we have to modify the above formula slightly. If $k_n,k_{n-1}$ are the time steps sizes of the current and previous time step, then we use the approximations

    @@ -297,7 +297,7 @@ \end{eqnarray*}" src="form_4101.png"/>

    where ${(\cdot)}^{*,n} = \left(1+\frac{k_n}{k_{n-1}}\right)(\cdot)^{n-1} -
-\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$ denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    +\frac{k_n}{k_{n-1}}(\cdot)^{n-2}$" src="form_4102.png"/> denotes the extrapolation of velocity $\mathbf u$ and temperature $T$ to time level $n$, using the values at the two previous time steps. That's not an easy to read equation, but will provide us with the desired higher order accuracy. As a consistency check, it is easy to verify that it reduces to the same equation as above if $k_n=k_{n-1}$.

    As a final remark we note that the choice of a higher order time stepping scheme of course forces us to keep more time steps in memory; in particular, we here will need to have $T^{n-2}$ around, a vector that we could previously discard. This seems like a nuisance that we were able to avoid previously by using only a first order time stepping scheme, but as we will see below when discussing the topic of stabilization, we will need this vector anyway and so keeping it around for time discretization is essentially for free and gives us the opportunity to use a higher order scheme.

    Weak form and space discretization for the Stokes part

    Like solving the mixed Laplace equations, solving the Stokes equations requires us to choose particular pairs of finite elements for velocities and pressure variables. Because this has already been discussed in step-22, we only cover this topic briefly: Here, we use the stable pair $Q_{p+1}^d \times Q_p, p\ge 1$. These are continuous elements, so we can form the weak form of the Stokes equation without problem by integrating by parts and substituting continuous functions by their discrete counterparts:

    @@ -360,7 +360,7 @@ T^{\alpha-1} \end{eqnarray*}" src="form_4116.png"/>

    -

    where we will later choose the stabilization exponent $\alpha$ from within the range $[1,2]$. Note that $R_\alpha(T)$ will be zero if $T$ satisfies the temperature equation, since then the term in parentheses will be zero. Multiplying terms out, we get the following, entirely equivalent form:

    +

    where we will later choose the stabilization exponent $\alpha$ from within the range $[1,2]$. Note that $R_\alpha(T)$ will be zero if $T$ satisfies the temperature equation, since then the term in parentheses will be zero. Multiplying terms out, we get the following, entirely equivalent form:

    \begin{eqnarray*}
   R_\alpha(T)
   =
@@ -397,11 +397,11 @@
  c_R\ \|\mathbf{u}\|_{L^\infty(\Omega)} \ \mathrm{var}(T)
  \ |\mathrm{diam}(\Omega)|^{\alpha-2}$, where $\mathrm{var}(T)=\max_\Omega T - \min_\Omega T$ is the range of present temperature values (remember that buoyancy is driven by temperature variations, not the absolute temperature) and $c_R$ is a dimensionless constant. To understand why this method works consider this: If on a particular cell $K$ the temperature field is smooth, then we expect the residual to be small there (in fact to be on the order of ${\cal O}(h_K)$) and the stabilization term that injects artificial diffusion will there be of size $h_K^{\alpha+1}$ — i.e., rather small, just as we hope it to be when no additional diffusion is necessary. On the other hand, if we are on or close to a discontinuity of the temperature field, then the residual will be large; the minimum operation in the definition of $\nu_\alpha(T)$ will then ensure that the stabilization has size $h_K$ — the optimal amount of artificial viscosity to ensure stability of the scheme.

    Whether or not this scheme really works is a good question. Computations by Guermond and Popov have shown that this form of stabilization actually performs much better than most of the other stabilization schemes that are around (for example streamline diffusion, to name only the simplest one). Furthermore, for $\alpha\in
-[1,2)$ they can even prove that it produces better convergence orders for the linear transport equation than for example streamline diffusion. For $\alpha=2$, no theoretical results are currently available, but numerical tests indicate that the results are considerably better than for $\alpha=1$.

    +[1,2)$" src="form_4128.png"/> they can even prove that it produces better convergence orders for the linear transport equation than for example streamline diffusion. For $\alpha=2$, no theoretical results are currently available, but numerical tests indicate that the results are considerably better than for $\alpha=1$.

    A more practical question is how to introduce this artificial diffusion into the equations we would like to solve. Note that the numerical viscosity $\nu(T)$ is temperature-dependent, so the equation we want to solve is nonlinear in $T$ — not what one desires from a simple method to stabilize an equation, and even less so if we realize that $\nu(T)$ is nondifferentiable in $T$. However, there is no reason to despair: we still have to discretize in time and we can treat the term explicitly.

    In the definition of the stabilization parameter, we approximate the time derivative by $\frac{\partial T}{\partial t} \approx
-\frac{T^{n-1}-T^{n-2}}{k^{n-1}}$. This approximation makes only use of available time data and this is the reason why we need to store data of two previous time steps (which enabled us to use the BDF-2 scheme without additional storage cost). We could now simply evaluate the rest of the terms at $t_{n-1}$, but then the discrete residual would be nothing else than a backward Euler approximation, which is only first order accurate. So, in case of smooth solutions, the residual would be still of the order $h$, despite the second order time accuracy in the outer BDF-2 scheme and the spatial FE discretization. This is certainly not what we want to have (in fact, we desired to have small residuals in regions where the solution behaves nicely), so a bit more care is needed. The key to this problem is to observe that the first derivative as we constructed it is actually centered at $t_{n-\frac{3}{2}}$. We get the desired second order accurate residual calculation if we evaluate all spatial terms at $t_{n-\frac{3}{2}}$ by using the approximation $\frac 12 T^{n-1}+\frac 12 T^{n-2}$, which means that we calculate the nonlinear viscosity as a function of this intermediate temperature, $\nu_\alpha =
-\nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right)$. Note that this evaluation of the residual is nothing else than a Crank-Nicolson scheme, so we can be sure that now everything is alright. One might wonder whether it is a problem that the numerical viscosity now is not evaluated at time $n$ (as opposed to the rest of the equation). However, this offset is uncritical: For smooth solutions, $\nu_\alpha$ will vary continuously, so the error in time offset is $k$ times smaller than the nonlinear viscosity itself, i.e., it is a small higher order contribution that is left out. That's fine because the term itself is already at the level of discretization error in smooth regions.

    +\frac{T^{n-1}-T^{n-2}}{k^{n-1}}$" src="form_4130.png"/>. This approximation makes only use of available time data and this is the reason why we need to store data of two previous time steps (which enabled us to use the BDF-2 scheme without additional storage cost). We could now simply evaluate the rest of the terms at $t_{n-1}$, but then the discrete residual would be nothing else than a backward Euler approximation, which is only first order accurate. So, in case of smooth solutions, the residual would be still of the order $h$, despite the second order time accuracy in the outer BDF-2 scheme and the spatial FE discretization. This is certainly not what we want to have (in fact, we desired to have small residuals in regions where the solution behaves nicely), so a bit more care is needed. The key to this problem is to observe that the first derivative as we constructed it is actually centered at $t_{n-\frac{3}{2}}$. We get the desired second order accurate residual calculation if we evaluate all spatial terms at $t_{n-\frac{3}{2}}$ by using the approximation $\frac 12 T^{n-1}+\frac 12 T^{n-2}$, which means that we calculate the nonlinear viscosity as a function of this intermediate temperature, $\nu_\alpha =
+\nu_\alpha\left(\frac 12 T^{n-1}+\frac 12 T^{n-2}\right)$. Note that this evaluation of the residual is nothing else than a Crank-Nicolson scheme, so we can be sure that now everything is alright. One might wonder whether it is a problem that the numerical viscosity now is not evaluated at time $n$ (as opposed to the rest of the equation). However, this offset is uncritical: For smooth solutions, $\nu_\alpha$ will vary continuously, so the error in time offset is $k$ times smaller than the nonlinear viscosity itself, i.e., it is a small higher order contribution that is left out. That's fine because the term itself is already at the level of discretization error in smooth regions.

    Using the BDF-2 scheme introduced above, this yields for the simpler case of uniform time steps of size $k$:

    \begin{eqnarray*}
   \frac 32 T^n
@@ -548,14 +548,14 @@
 \end{eqnarray*}

    where $\tilde A^{-1},\tilde S^{-1}$ are approximations to the inverse matrices. In particular, it turned out that $S$ is spectrally equivalent to the mass matrix and consequently replacing $\tilde
-S^{-1}$ by a CG solver applied to the mass matrix on the pressure space was a good choice. In a small deviation from step-22, we here have a coefficient $\eta$ in the momentum equation, and by the same derivation as there we should arrive at the conclusion that it is the weighted mass matrix with entries $\tilde S_{ij}=(\eta^{-1}\varphi_i,\varphi_j)$ that we should be using.

    +S^{-1}$" src="form_4147.png"/> by a CG solver applied to the mass matrix on the pressure space was a good choice. In a small deviation from step-22, we here have a coefficient $\eta$ in the momentum equation, and by the same derivation as there we should arrive at the conclusion that it is the weighted mass matrix with entries $\tilde S_{ij}=(\eta^{-1}\varphi_i,\varphi_j)$ that we should be using.

    It was more complicated to come up with a good replacement $\tilde
 A^{-1}$, which corresponds to the discretized symmetric Laplacian of the vector-valued velocity field, i.e. $A_{ij} = (\varepsilon {\mathbf v}_i, 2\eta \varepsilon ({\mathbf
 v}_j))$. In step-22 we used a sparse LU decomposition (using the SparseDirectUMFPACK class) of $A$ for $\tilde A^{-1}$ — the perfect preconditioner — in 2d, but for 3d memory and compute time is not usually sufficient to actually compute this decomposition; consequently, we only use an incomplete LU decomposition (ILU, using the SparseILU class) in 3d.

    For this program, we would like to go a bit further. To this end, note that the symmetrized bilinear form on vector fields, $(\varepsilon {\mathbf v}_i, 2 \eta \varepsilon ({\mathbf v}_j))$ is not too far away from the nonsymmetrized version, $(\nabla {\mathbf v}_i, \eta \nabla {\mathbf v}_j)
 = \sum_{k,l=1}^d
   (\partial_k ({\mathbf v}_i)_l, \eta \partial_k ({\mathbf v}_j)_l)
-$ (note that the factor 2 has disappeared in this form). The latter, however, has the advantage that the dim vector components of the test functions are not coupled (well, almost, see below), i.e., the resulting matrix is block-diagonal: one block for each vector component, and each of these blocks is equal to the Laplace matrix for this vector component. So assuming we order degrees of freedom in such a way that first all $x$-components of the velocity are numbered, then the $y$-components, and then the $z$-components, then the matrix $\hat A$ that is associated with this slightly different bilinear form has the form

    +$" src="form_4153.png"/> (note that the factor 2 has disappeared in this form). The latter, however, has the advantage that the dim vector components of the test functions are not coupled (well, almost, see below), i.e., the resulting matrix is block-diagonal: one block for each vector component, and each of these blocks is equal to the Laplace matrix for this vector component. So assuming we order degrees of freedom in such a way that first all $x$-components of the velocity are numbered, then the $y$-components, and then the $z$-components, then the matrix $\hat A$ that is associated with this slightly different bilinear form has the form

    \begin{eqnarray*}
   \hat A =
   \left(\begin{array}{ccc}
@@ -613,8 +613,8 @@
   \end{array}\right).
 \end{eqnarray*}

    -

    The problem with this is: We never use the whole matrix at the same time. In fact, it never really exists at the same time: As explained above, $K$ and $F_T$ depend on the already computed solution $U^n$, in the first case through the time step (that depends on $U^n$ because it has to satisfy a CFL condition). So we can only assemble it once we've already solved the top left $2\times 2$ block Stokes system, and once we've moved on to the temperature equation we don't need the Stokes part any more; the fact that we build an object for a matrix that never exists as a whole in memory at any given time led us to jumping through some hoops in step-21, so let's not repeat this sort of error. Furthermore, we don't actually build the matrix $C$: Because by the time we get to the temperature equation we already know $U^n$, and because we have to assemble the right hand side $F_T$ at this time anyway, we simply move the term $CU^n$ to the right hand side and assemble it along with all the other terms there. What this means is that there does not remain a part of the matrix where temperature variables and Stokes variables couple, and so a global enumeration of all degrees of freedom is no longer important: It is enough if we have an enumeration of all Stokes degrees of freedom, and of all temperature degrees of freedom independently.

    -

    In essence, there is consequently not much use in putting everything into a block matrix (though there are of course the same good reasons to do so for the $2\times 2$ Stokes part), or, for that matter, in putting everything into the same DoFHandler object.

    +

    The problem with this is: We never use the whole matrix at the same time. In fact, it never really exists at the same time: As explained above, $K$ and $F_T$ depend on the already computed solution $U^n$, in the first case through the time step (that depends on $U^n$ because it has to satisfy a CFL condition). So we can only assemble it once we've already solved the top left $2\times 2$ block Stokes system, and once we've moved on to the temperature equation we don't need the Stokes part any more; the fact that we build an object for a matrix that never exists as a whole in memory at any given time led us to jumping through some hoops in step-21, so let's not repeat this sort of error. Furthermore, we don't actually build the matrix $C$: Because by the time we get to the temperature equation we already know $U^n$, and because we have to assemble the right hand side $F_T$ at this time anyway, we simply move the term $CU^n$ to the right hand side and assemble it along with all the other terms there. What this means is that there does not remain a part of the matrix where temperature variables and Stokes variables couple, and so a global enumeration of all degrees of freedom is no longer important: It is enough if we have an enumeration of all Stokes degrees of freedom, and of all temperature degrees of freedom independently.

    +

    In essence, there is consequently not much use in putting everything into a block matrix (though there are of course the same good reasons to do so for the $2\times 2$ Stokes part), or, for that matter, in putting everything into the same DoFHandler object.

    But are there downsides to doing so? These exist, though they may not be obvious at first. The main problem is that if we need to create one global finite element that contains velocity, pressure, and temperature shape functions, and use this to initialize the DoFHandler. But we also use this finite element object to initialize all FEValues or FEFaceValues objects that we use. This may not appear to be that big a deal, but imagine what happens when, for example, we evaluate the residual $
   R_\alpha(T)
   =
@@ -854,8 +854,8 @@
 </p>
 <p> which indeed is very simple. A GMRES solver based on exact matrices would converge in one iteration, since all eigenvalues are equal (any Krylov method takes at most as many iterations as there are distinct eigenvalues). Such a preconditioner for the blocked Stokes system has been proposed by Silvester and Wathen ( -

    Replacing $P$ by $\tilde{P}$ keeps that spirit alive: the product $P^{-1} A$ will still be close to a matrix with eigenvalues 1 with a distribution that does not depend on the problem size. This lets us hope to be able to get a number of GMRES iterations that is problem-size independent.

    -

    The deal.II users who have already gone through the step-20 and step-22 tutorials can certainly imagine how we're going to implement this. We replace the exact inverse matrices in $P^{-1}$ by some approximate inverses built from the InverseMatrix class, and the inverse Schur complement will be approximated by the pressure mass matrix $M_p$ (weighted by $\eta^{-1}$ as mentioned in the introduction). As pointed out in the results section of step-22, we can replace the exact inverse of $A$ by just the application of a preconditioner, in this case on a vector Laplace matrix as was explained in the introduction. This does increase the number of (outer) GMRES iterations, but is still significantly cheaper than an exact inverse, which would require between 20 and 35 CG iterations for each outer solver step (using the AMG preconditioner).

    +

    Replacing $P$ by $\tilde{P}$ keeps that spirit alive: the product $P^{-1} A$ will still be close to a matrix with eigenvalues 1 with a distribution that does not depend on the problem size. This lets us hope to be able to get a number of GMRES iterations that is problem-size independent.

    +

    The deal.II users who have already gone through the step-20 and step-22 tutorials can certainly imagine how we're going to implement this. We replace the exact inverse matrices in $P^{-1}$ by some approximate inverses built from the InverseMatrix class, and the inverse Schur complement will be approximated by the pressure mass matrix $M_p$ (weighted by $\eta^{-1}$ as mentioned in the introduction). As pointed out in the results section of step-22, we can replace the exact inverse of $A$ by just the application of a preconditioner, in this case on a vector Laplace matrix as was explained in the introduction. This does increase the number of (outer) GMRES iterations, but is still significantly cheaper than an exact inverse, which would require between 20 and 35 CG iterations for each outer solver step (using the AMG preconditioner).

    Having the above explanations in mind, we define a preconditioner class with a vmult functionality, which is all we need for the interaction with the usual solver functions further below in the program code.

    First the declarations. These are similar to the definition of the Schur complement in step-20, with the difference that we need some more preconditioners in the constructor and that the matrices we use here are built upon Trilinos:

      template <class PreconditionerTypeA, class PreconditionerTypeMp>
    @@ -904,9 +904,9 @@
     
     
    IndexSet complete_index_set(const IndexSet::size_type N)
    Definition index_set.h:1204
    -

    Next is the vmult function. We implement the action of $P^{-1}$ as described above in three successive steps. In formulas, we want to compute $Y=P^{-1}X$ where $X,Y$ are both vectors with two block components.

    +

    Next is the vmult function. We implement the action of $P^{-1}$ as described above in three successive steps. In formulas, we want to compute $Y=P^{-1}X$ where $X,Y$ are both vectors with two block components.

    The first step multiplies the velocity part of the vector by a preconditioner of the matrix $A$, i.e., we compute $Y_0={\tilde
-   A}^{-1}X_0$. The resulting velocity vector is then multiplied by $B$ and subtracted from the pressure, i.e., we want to compute $X_1-BY_0$. This second step only acts on the pressure vector and is accomplished by the residual function of our matrix classes, except that the sign is wrong. Consequently, we change the sign in the temporary pressure vector and finally multiply by the inverse pressure mass matrix to get the final pressure vector, completing our work on the Stokes preconditioner:

    + A}^{-1}X_0$" src="form_4189.png"/>. The resulting velocity vector is then multiplied by $B$ and subtracted from the pressure, i.e., we want to compute $X_1-BY_0$. This second step only acts on the pressure vector and is accomplished by the residual function of our matrix classes, except that the sign is wrong. Consequently, we change the sign in the temporary pressure vector and finally multiply by the inverse pressure mass matrix to get the final pressure vector, completing our work on the Stokes preconditioner:

      template <class PreconditionerTypeA, class PreconditionerTypeMp>
      void
      BlockSchurPreconditioner<PreconditionerTypeA, PreconditionerTypeMp>::vmult(
    @@ -1015,7 +1015,7 @@
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation

    BoussinesqFlowProblem class implementation

    BoussinesqFlowProblem::BoussinesqFlowProblem

    -

    The constructor of this class is an extension of the constructor in step-22. We need to add the various variables that concern the temperature. As discussed in the introduction, we are going to use $Q_2^d\times Q_1$ (Taylor-Hood) elements again for the Stokes part, and $Q_2$ elements for the temperature. However, by using variables that store the polynomial degree of the Stokes and temperature finite elements, it is easy to consistently modify the degree of the elements as well as all quadrature formulas used on them downstream. Moreover, we initialize the time stepping as well as the options for matrix assembly and preconditioning:

    +

    The constructor of this class is an extension of the constructor in step-22. We need to add the various variables that concern the temperature. As discussed in the introduction, we are going to use $Q_2^d\times Q_1$ (Taylor-Hood) elements again for the Stokes part, and $Q_2$ elements for the temperature. However, by using variables that store the polynomial degree of the Stokes and temperature finite elements, it is easy to consistently modify the degree of the elements as well as all quadrature formulas used on them downstream. Moreover, we initialize the time stepping as well as the options for matrix assembly and preconditioning:

      template <int dim>
      BoussinesqFlowProblem<dim>::BoussinesqFlowProblem()
      : triangulation(Triangulation<dim>::maximum_smoothing)
    @@ -1080,7 +1080,7 @@
    ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::get_extrapolated_temperature_range

    -

    Next a function that determines the minimum and maximum temperature at quadrature points inside $\Omega$ when extrapolated from the two previous time steps to the current one. We need this information in the computation of the artificial viscosity parameter $\nu$ as discussed in the introduction.

    +

    Next a function that determines the minimum and maximum temperature at quadrature points inside $\Omega$ when extrapolated from the two previous time steps to the current one. We need this information in the computation of the artificial viscosity parameter $\nu$ as discussed in the introduction.

    The formula for the extrapolated temperature is $\left(1+\frac{k_n}{k_{n-1}} \right)T^{n-1} + \frac{k_n}{k_{n-1}}
    T^{n-2}$. The way to compute it is to loop over all quadrature points and update the maximum and minimum value if the current value is bigger/smaller than the previous one. We initialize the variables that store the max and min before the loop over all quadrature points by the smallest and the largest number representable as a double. Then we know for a fact that it is larger/smaller than the minimum/maximum and that the loop over all quadrature points is ultimately going to update the initial value with the correct one.

    The only other complication worth mentioning here is that in the first time step, $T^{k-2}$ is not yet available of course. In that case, we can only use $T^{k-1}$ which we have from the initial temperature. As quadrature points, we use the same choice as in the previous function though with the difference that now the number of repetitions is determined by the polynomial degree of the temperature field.

    @@ -1150,8 +1150,8 @@
     
    ::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::compute_viscosity

    -

    The last of the tool functions computes the artificial viscosity parameter $\nu|_K$ on a cell $K$ as a function of the extrapolated temperature, its gradient and Hessian (second derivatives), the velocity, the right hand side $\gamma$ all on the quadrature points of the current cell, and various other parameters as described in detail in the introduction.

    -

    There are some universal constants worth mentioning here. First, we need to fix $\beta$; we choose $\beta=0.017\cdot dim$, a choice discussed in detail in the results section of this tutorial program. The second is the exponent $\alpha$; $\alpha=1$ appears to work fine for the current program, even though some additional benefit might be expected from choosing $\alpha = 2$. Finally, there is one thing that requires special casing: In the first time step, the velocity equals zero, and the formula for $\nu|_K$ is not defined. In that case, we return $\nu|_K=5\cdot 10^3
+<p>The last of the tool functions computes the artificial viscosity parameter <picture><source srcset=$\nu|_K$ on a cell $K$ as a function of the extrapolated temperature, its gradient and Hessian (second derivatives), the velocity, the right hand side $\gamma$ all on the quadrature points of the current cell, and various other parameters as described in detail in the introduction.

    +

    There are some universal constants worth mentioning here. First, we need to fix $\beta$; we choose $\beta=0.017\cdot dim$, a choice discussed in detail in the results section of this tutorial program. The second is the exponent $\alpha$; $\alpha=1$ appears to work fine for the current program, even though some additional benefit might be expected from choosing $\alpha = 2$. Finally, there is one thing that requires special casing: In the first time step, the velocity equals zero, and the formula for $\nu|_K$ is not defined. In that case, we return $\nu|_K=5\cdot 10^3
    \cdot h_K$, a choice admittedly more motivated by heuristics than anything else (it is in the same order of magnitude, however, as the value returned for most cells on the second time step).

    The rest of the function should be mostly obvious based on the material discussed in the introduction:

      template <int dim>
    @@ -1221,7 +1221,7 @@
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)

    BoussinesqFlowProblem::setup_dofs

    This is the function that sets up the DoFHandler objects we have here (one for the Stokes part and one for the temperature part) as well as set to the right sizes the various objects required for the linear algebra in this program. Its basic operations are similar to what we do in step-22.

    -

    The body of the function first enumerates all degrees of freedom for the Stokes and temperature systems. For the Stokes part, degrees of freedom are then sorted to ensure that velocities precede pressure DoFs so that we can partition the Stokes matrix into a $2\times 2$ matrix. As a difference to step-22, we do not perform any additional DoF renumbering. In that program, it paid off since our solver was heavily dependent on ILU's, whereas we use AMG here which is not sensitive to the DoF numbering. The IC preconditioner for the inversion of the pressure mass matrix would of course take advantage of a Cuthill-McKee like renumbering, but its costs are low compared to the velocity portion, so the additional work does not pay off.

    +

    The body of the function first enumerates all degrees of freedom for the Stokes and temperature systems. For the Stokes part, degrees of freedom are then sorted to ensure that velocities precede pressure DoFs so that we can partition the Stokes matrix into a $2\times 2$ matrix. As a difference to step-22, we do not perform any additional DoF renumbering. In that program, it paid off since our solver was heavily dependent on ILU's, whereas we use AMG here which is not sensitive to the DoF numbering. The IC preconditioner for the inversion of the pressure mass matrix would of course take advantage of a Cuthill-McKee like renumbering, but its costs are low compared to the velocity portion, so the additional work does not pay off.

    We then proceed with the generation of the hanging node constraints that arise from adaptive grid refinement for both DoFHandler objects. For the velocity, we impose no-flux boundary conditions $\mathbf{u}\cdot
    \mathbf{n}=0$ by adding constraints to the object that already stores the hanging node constraints matrix. The second parameter in the function describes the first of the velocity components in the total dof vector, which is zero here. The variable no_normal_flux_boundaries denotes the boundary indicators for which to set the no flux boundary conditions; here, this is boundary indicator zero.

    After having done so, we count the number of degrees of freedom in the various blocks:

    @@ -1275,7 +1275,7 @@
    void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
    std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())

    The next step is to create the sparsity pattern for the Stokes and temperature system matrices as well as the preconditioner matrix from which we build the Stokes preconditioner. As in step-22, we choose to create the pattern by using the blocked version of DynamicSparsityPattern.

    -

    So, we first release the memory stored in the matrices, then set up an object of type BlockDynamicSparsityPattern consisting of $2\times 2$ blocks (for the Stokes system matrix and preconditioner) or DynamicSparsityPattern (for the temperature part). We then fill these objects with the nonzero pattern, taking into account that for the Stokes system matrix, there are no entries in the pressure-pressure block (but all velocity vector components couple with each other and with the pressure). Similarly, in the Stokes preconditioner matrix, only the diagonal blocks are nonzero, since we use the vector Laplacian as discussed in the introduction. This operator only couples each vector component of the Laplacian with itself, but not with the other vector components. (Application of the constraints resulting from the no-flux boundary conditions will couple vector components at the boundary again, however.)

    +

    So, we first release the memory stored in the matrices, then set up an object of type BlockDynamicSparsityPattern consisting of $2\times 2$ blocks (for the Stokes system matrix and preconditioner) or DynamicSparsityPattern (for the temperature part). We then fill these objects with the nonzero pattern, taking into account that for the Stokes system matrix, there are no entries in the pressure-pressure block (but all velocity vector components couple with each other and with the pressure). Similarly, in the Stokes preconditioner matrix, only the diagonal blocks are nonzero, since we use the vector Laplacian as discussed in the introduction. This operator only couples each vector component of the Laplacian with itself, but not with the other vector components. (Application of the constraints resulting from the no-flux boundary conditions will couple vector components at the boundary again, however.)

    When generating the sparsity pattern, we directly apply the constraints from hanging nodes and no-flux boundary conditions. This approach was already used in step-27, but is different from the one in early tutorial programs where we first built the original sparsity pattern and only then added the entries resulting from constraints. The reason for doing so is that later during assembly we are going to distribute the constraints immediately when transferring local to global dofs. Consequently, there will be no data written at positions of constrained degrees of freedom, so we can let the DoFTools::make_sparsity_pattern function omit these entries by setting the last Boolean flag to false. Once the sparsity pattern is ready, we can use it to initialize the Trilinos matrices. Since the Trilinos matrices store the sparsity pattern internally, there is no need to keep the sparsity pattern around after the initialization of the matrix.

      stokes_partitioning.resize(2);
      stokes_partitioning[0] = complete_index_set(n_u);
    @@ -1886,7 +1886,7 @@
      temperature_solution = old_temperature_solution;
     
    double minimal_cell_diameter(const Triangulation< dim, spacedim > &triangulation, const Mapping< dim, spacedim > &mapping=(ReferenceCells::get_hypercube< dim >() .template get_default_linear_mapping< dim, spacedim >()))
    -

    Next we set up the temperature system and the right hand side using the function assemble_temperature_system(). Knowing the matrix and right hand side of the temperature equation, we set up a preconditioner and a solver. The temperature matrix is a mass matrix (with eigenvalues around one) plus a Laplace matrix (with eigenvalues between zero and $ch^{-2}$) times a small number proportional to the time step $k_n$. Hence, the resulting symmetric and positive definite matrix has eigenvalues in the range $[1,1+k_nh^{-2}]$ (up to constants). This matrix is only moderately ill conditioned even for small mesh sizes and we get a reasonably good preconditioner by simple means, for example with an incomplete Cholesky decomposition preconditioner (IC) as we also use for preconditioning the pressure mass matrix solver. As a solver, we choose the conjugate gradient method CG. As before, we tell the solver to use Trilinos vectors via the template argument TrilinosWrappers::MPI::Vector. Finally, we solve, distribute the hanging node constraints and write out the number of iterations.

    +

    Next we set up the temperature system and the right hand side using the function assemble_temperature_system(). Knowing the matrix and right hand side of the temperature equation, we set up a preconditioner and a solver. The temperature matrix is a mass matrix (with eigenvalues around one) plus a Laplace matrix (with eigenvalues between zero and $ch^{-2}$) times a small number proportional to the time step $k_n$. Hence, the resulting symmetric and positive definite matrix has eigenvalues in the range $[1,1+k_nh^{-2}]$ (up to constants). This matrix is only moderately ill conditioned even for small mesh sizes and we get a reasonably good preconditioner by simple means, for example with an incomplete Cholesky decomposition preconditioner (IC) as we also use for preconditioning the pressure mass matrix solver. As a solver, we choose the conjugate gradient method CG. As before, we tell the solver to use Trilinos vectors via the template argument TrilinosWrappers::MPI::Vector. Finally, we solve, distribute the hanging node constraints and write out the number of iterations.

      assemble_temperature_system(maximal_velocity);
      {
      SolverControl solver_control(temperature_matrix.m(),
    @@ -2358,7 +2358,7 @@ \ \frac{1}{|\mathrm{diam}(\Omega)|^{\alpha-2}}$" src="form_4213.png"/> instead, where we had set the scaling parameter to one. Since we only computed on the unit square/cube where $\mathrm{diam}(\Omega)=2^{1/d}$, this was entirely equivalent to using the correct formula with $c_R=\left(2^{1/d}\right)^{4-2\alpha}=2^{\frac{4-2\alpha}{d}}$. Since this value for $c_R$ appears to work just fine for the current program, we corrected the formula in the program and set $c_R$ to a value that reproduces exactly the results we had before. We will, however, revisit this issue again in step-32.

    Now, however, back to the discussion of what values of $c_k$ and $\beta$ to choose:

    Choosing ck and beta

    -

    These two constants are definitely linked in some way. The reason is easy to see: In the case of a pure advection problem, $\frac{\partial T}{\partial t} + \mathbf{u}\cdot\nabla T = \gamma$, any explicit scheme has to satisfy a CFL condition of the form $k\le \min_K \frac{c_k^a h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. On the other hand, for a pure diffusion problem, $\frac{\partial T}{\partial t} + \nu \Delta T = \gamma$, explicit schemes need to satisfy a condition $k\le \min_K \frac{c_k^d h_K^2}{\nu}$. So given the form of $\nu$ above, an advection diffusion problem like the one we have to solve here will result in a condition of the form $
+<p>These two constants are definitely linked in some way. The reason is easy to see: In the case of a pure advection problem, <picture><source srcset=$\frac{\partial T}{\partial t} + \mathbf{u}\cdot\nabla T = \gamma$, any explicit scheme has to satisfy a CFL condition of the form $k\le \min_K \frac{c_k^a h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$. On the other hand, for a pure diffusion problem, $\frac{\partial T}{\partial t} + \nu \Delta T = \gamma$, explicit schemes need to satisfy a condition $k\le \min_K \frac{c_k^d h_K^2}{\nu}$. So given the form of $\nu$ above, an advection diffusion problem like the one we have to solve here will result in a condition of the form $
 k\le \min_K \min \left\{
   \frac{c_k^a h_K}{\|\mathbf{u}\|_{L^\infty(K)}},
   \frac{c_k^d h_K^2}{\beta \|\mathbf{u}\|_{L^\infty(K)} h_K}\right\}
@@ -2394,7 +2394,7 @@
 <tr>
 <td><img src= -

    Again, small values of $\beta$ lead to less diffusion but we have to choose the time step very small to keep things under control. Too large values of $\beta$ make for more diffusion, but again require small time steps. The best value would appear to be $\beta=0.03$, as for the $Q_1$ element, and then we have to choose $k=\frac 18\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ — exactly half the size for the $Q_1$ element, a fact that may not be surprising if we state the CFL condition as the requirement that the time step be small enough so that the distance transport advects in each time step is no longer than one grid point away (which for $Q_1$ elements is $h_K$, but for $Q_2$ elements is $h_K/2$). It turns out that $\beta$ needs to be slightly larger for obtaining stable results also late in the simulation at times larger than 60, so we actually choose it as $\beta = 0.034$ in the code.

    +

    Again, small values of $\beta$ lead to less diffusion but we have to choose the time step very small to keep things under control. Too large values of $\beta$ make for more diffusion, but again require small time steps. The best value would appear to be $\beta=0.03$, as for the $Q_1$ element, and then we have to choose $k=\frac 18\frac{h_K}{\|\mathbf{u}\|_{L^\infty(K)}}$ — exactly half the size for the $Q_1$ element, a fact that may not be surprising if we state the CFL condition as the requirement that the time step be small enough so that the distance transport advects in each time step is no longer than one grid point away (which for $Q_1$ elements is $h_K$, but for $Q_2$ elements is $h_K/2$). It turns out that $\beta$ needs to be slightly larger for obtaining stable results also late in the simulation at times larger than 60, so we actually choose it as $\beta = 0.034$ in the code.

    Results for 3d
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-11-15 06:44:29.547672630 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_32.html 2024-11-15 06:44:29.551672666 +0000 @@ -179,58 +179,58 @@

    In addition to these changes, we also use a slightly different preconditioner, and we will have to make a number of changes that have to do with the fact that we want to solve a realistic problem here, not a model problem. The latter, in particular, will require that we think about scaling issues as well as what all those parameters and coefficients in the equations under consideration actually mean. We will discuss first the issues that affect changes in the mathematical formulation and solver structure, then how to parallelize things, and finally the actual testcase we will consider.

    Using the "right" pressure

    In step-31, we used the following Stokes model for the velocity and pressure field:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho \; \beta \; T \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4253.png"/>

    -

    The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that $\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
-[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    -\begin{eqnarray*}
+<p> The right hand side of the first equation appears a wee bit unmotivated. Here's how things should really be. We need the external forces that act on the fluid, which we assume are given by gravity only. In the current case, we assume that the fluid does expand slightly for the purposes of this gravity force, but not enough that we need to modify the incompressibility condition (the second equation). What this means is that for the purpose of the right hand side, we can assume that <picture><source srcset=$\rho=\rho(T)$. An assumption that may not be entirely justified is that we can assume that the changes of density as a function of temperature are small, leading to an expression of the form $\rho(T) = \rho_{\text{ref}}
+[1-\beta(T-T_{\text{ref}})]$, i.e., the density equals $\rho_{\text{ref}}$ at reference temperature and decreases linearly as the temperature increases (as the material expands). The force balance equation then looks properly written like this:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho_{\text{ref}} [1-\beta(T-T_{\text{ref}})] \mathbf{g}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4257.png"/>

    -

    Now note that the gravity force results from a gravity potential as $\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    -\begin{eqnarray*}
+<p> Now note that the gravity force results from a gravity potential as <picture><source srcset=$\mathbf g=-\nabla \varphi$, so that we can re-write this as follows:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   -\rho_{\text{ref}} \; \beta\; T\; \mathbf{g}
   -\rho_{\text{ref}} [1+\beta T_{\text{ref}}] \nabla\varphi.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4259.png"/>

    -

    The second term on the right is time independent, and so we could introduce a new "dynamic" pressure $p_{\text{dyn}}=p+\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    -\begin{eqnarray*}
+<p> The second term on the right is time independent, and so we could introduce a new $p_{\text{dyn}}=p+\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi=p_{\text{total}}-p_{\text{static}}$ with which the Stokes equations would read:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p_{\text{dyn}} &=&
   -\rho_{\text{ref}} \; \beta \; T \; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4261.png"/>

    This is exactly the form we used in step-31, and it was appropriate to do so because all changes in the fluid flow are only driven by the dynamic pressure that results from temperature differences. (In other words: Any contribution to the right hand side that results from taking the gradient of a scalar field have no effect on the velocity field.)

    On the other hand, we will here use the form of the Stokes equations that considers the total pressure instead:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T)\; \mathbf{g},
   \\
   \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4262.png"/>

    There are several advantages to this:

    • This way we can plot the pressure in our program in such a way that it actually shows the total pressure that includes the effects of temperature differences as well as the static pressure of the overlying rocks. Since the pressure does not appear any further in any of the other equations, whether to use one or the other is more a matter of taste than of correctness. The flow field is exactly the same, but we get a pressure that we can now compare with values that are given in geophysical books as those that hold at the bottom of the earth mantle, for example.
    • If we wanted to make the model even more realistic, we would have to take into account that many of the material parameters (e.g. the viscosity, the density, etc) not only depend on the temperature but also the total pressure.
    • -
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
-  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • +
    • The model above assumed a linear dependence $\rho(T) = \rho_{\text{ref}}
+  [1-\beta(T-T_{\text{ref}})]$ and assumed that $\beta$ is small. In practice, this may not be so. In fact, realistic models are certainly not linear, and $\beta$ may also not be small for at least part of the temperature range because the density's behavior is substantially dependent not only on thermal expansion but by phase changes.
    • A final reason to do this is discussed in the results section and concerns possible extensions to the model we use here. It has to do with the fact that the temperature equation (see below) we use here does not include a term that contains the pressure. It should, however: rock, like gas, heats up as you compress it. Consequently, material that rises up cools adiabatically, and cold material that sinks down heats adiabatically. We discuss this further below.
    -
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
-[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.
    +
    Note
    There is, however, a downside to this procedure. In the earth, the dynamic pressure is several orders of magnitude smaller than the total pressure. If we use the equations above and solve all variables to, say, 4 digits of accuracy, then we may be able to get the velocity and the total pressure right, but we will have no accuracy at all if we compute the dynamic pressure by subtracting from the total pressure the static part $p_\text{static}=\rho_{\text{ref}}
+[1+\beta T_{\text{ref}}] \varphi$. If, for example, the dynamic pressure is six orders of magnitude smaller than the static pressure, then we need to solve the overall pressure to at least seven digits of accuracy to get anything remotely accurate. That said, in practice this turns out not to be a limiting factor.

    The scaling of discretized equations

    Remember that we want to solve the following set of equations:

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \mathbf{g},
   \\
@@ -241,11 +241,11 @@
   {\mathbf u} \cdot \nabla T
   -
   \nabla \cdot \kappa \nabla T &=& \gamma,
-\end{eqnarray*} +\end{eqnarray*}" src="form_4265.png"/>

    augmented by appropriate boundary and initial conditions. As discussed in step-31, we will solve this set of equations by solving for a Stokes problem first in each time step, and then moving the temperature equation forward by one time interval.

    The problem under consideration in this current section is with the Stokes problem: if we discretize it as usual, we get a linear system

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   M \; X
   =
   \left(\begin{array}{cc}
@@ -260,10 +260,10 @@
   \end{array}\right)
   =
   F
-\end{eqnarray*} +\end{eqnarray*}" src="form_4266.png"/>

    which in this program we will solve with a FGMRES solver. This solver iterates until the residual of these linear equations is below a certain tolerance, i.e., until

    -\[
+<picture><source srcset=\[
   \left\|
   \left(\begin{array}{c}
     F_U - A U^{(k)} - B P^{(k)}
@@ -272,35 +272,35 @@
   \end{array}\right)
   \right\|
   < \text{Tol}.
-\] +\]" src="form_4267.png"/>

    -

    This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units $\frac{\text{Pa}}{\text{m}}
-\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
-       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    -

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
-\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    -

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    -\begin{eqnarray*}
+<p> This does not make any sense from the viewpoint of physical units: the quantities involved here have physical units so that the first part of the residual has units  <picture><source srcset=$\frac{\text{Pa}}{\text{m}}
+\text{m}^{\text{dim}}$ (most easily established by considering the term $(\nabla \cdot \mathbf v, p)_{\Omega}$ and considering that the pressure has units $\text{Pa}=\frac{\text{kg}}{\text{m}\;\text{s}^2}$ and the integration yields a factor of $\text{m}^{\text{dim}}$), whereas the second part of the residual has units $\frac{\text{m}^{\text{dim}}}{\text{s}}$. Taking the norm of this residual vector would yield a quantity with units $\text{m}^{\text{dim}-1} \sqrt{\left(\text{Pa}\right)^2 +
+       \left(\frac{\text{m}}{\text{s}}\right)^2}$. This, quite obviously, does not make sense, and we should not be surprised that doing so is eventually going to come back hurting us.

    +

    So why is this an issue here, but not in step-31? The reason back there is that everything was nicely balanced: velocities were on the order of one, the pressure likewise, the viscosity was one, and the domain had a diameter of $\sqrt{2}$. As a result, while nonsensical, nothing bad happened. On the other hand, as we will explain below, things here will not be that simply scaled: $\eta$ will be around $10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and the diameter of the domain is $10^7$. In other words, the order of magnitude for the first equation is going to be $\eta\text{div}\varepsilon(\mathbf u) \approx 10^{21} \frac{10^{-8}}{(10^7)^2}
+\approx 10^{-1}$, whereas the second equation will be around $\text{div}{\mathbf u}\approx \frac{10^{-8}}{10^7} \approx 10^{-15}$. Well, so what this will lead to is this: if the solver wants to make the residual small, it will almost entirely focus on the first set of equations because they are so much bigger, and ignore the divergence equation that describes mass conservation. That's exactly what happens: unless we set the tolerance to extremely small values, the resulting flow field is definitely not divergence free. As an auxiliary problem, it turns out that it is difficult to find a tolerance that always works; in practice, one often ends up with a tolerance that requires 30 or 40 iterations for most time steps, and 10,000 for some others.

    +

    So what's a numerical analyst to do in a case like this? The answer is to start at the root and first make sure that everything is mathematically consistent first. In our case, this means that if we want to solve the system of Stokes equations jointly, we have to scale them so that they all have the same physical dimensions. In our case, this means multiplying the second equation by something that has units $\frac{\text{Pa}\;\text{s}}{\text{m}}$; one choice is to multiply with $\frac{\eta}{L}$ where $L$ is a typical lengthscale in our domain (which experiments show is best chosen to be the diameter of plumes — around 10 km — rather than the diameter of the domain). Using these numbers for $\eta$ and $L$, this factor is around $10^{17}$. So, we now get this for the Stokes system:

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) + \nabla p &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4282.png"/>

    -

    The trouble with this is that the result is not symmetric any more (we have $\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    -\begin{eqnarray*}
+<p> The trouble with this is that the result is not symmetric any more (we have <picture><source srcset=$\frac{\eta}{L} \nabla \cdot$ at the bottom left, but not its transpose operator at the top right). This, however, can be cured by introducing a scaled pressure $\hat p = \frac{L}{\eta}p$, and we get the scaled equations

    +\begin{eqnarray*}
   -\nabla \cdot (2 \eta \varepsilon ({\mathbf u})) +
   \nabla \left(\frac{\eta}{L} \hat p\right) &=&
   \rho(T) \; \mathbf{g},
   \\
   \frac{\eta}{L} \nabla \cdot {\mathbf u} &=& 0.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4285.png"/>

    -

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    -

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    +

    This is now symmetric. Obviously, we can easily recover the original pressure $p$ from the scaled pressure $\hat p$ that we compute as a result of this procedure.

    +

    In the program below, we will introduce a factor EquationData::pressure_scaling that corresponds to $\frac{\eta}{L}$, and we will use this factor in the assembly of the system matrix and preconditioner. Because it is annoying and error prone, we will recover the unscaled pressure immediately following the solution of the linear system, i.e., the solution vector's pressure component will immediately be unscaled to retrieve the physical pressure. Since the solver uses the fact that we can use a good initial guess by extrapolating the previous solutions, we also have to scale the pressure immediately before solving.

    Changes to the Stokes preconditioner and solver

    -

    In this tutorial program, we apply a variant of the preconditioner used in step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    -\begin{eqnarray*}
+<p>In this tutorial program, we apply a variant of the preconditioner used in <a class=step-31. That preconditioner was built to operate on the system matrix $M$ in block form such that the product matrix

    +\begin{eqnarray*}
   P^{-1} M
   =
   \left(\begin{array}{cc}
@@ -309,24 +309,24 @@
   \left(\begin{array}{cc}
     A & B^T \\ B & 0
   \end{array}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_4287.png"/>

    -

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    -

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    +

    is of a form that Krylov-based iterative solvers like GMRES can solve in a few iterations. We then replaced the exact inverse of $A$ by the action of an AMG preconditioner $\tilde{A}$ based on a vector Laplace matrix, approximated the Schur complement $S = B A^{-1} B^T$ by a mass matrix $M_p$ on the pressure space and wrote an InverseMatrix class for implementing the action of $M_p^{-1}\approx S^{-1}$ on vectors. In the InverseMatrix class, we used a CG solve with an incomplete Cholesky (IC) preconditioner for performing the inner solves.

    +

    An observation one can make is that we use just the action of a preconditioner for approximating the velocity inverse $A^{-1}$ (and the outer GMRES iteration takes care of the approximate character of the inverse), whereas we use a more or less exact inverse for $M_p^{-1}$, realized by a fully converged CG solve. This appears unbalanced, but there's system to this madness: almost all the effort goes into the upper left block to which we apply the AMG preconditioner, whereas even an exact inversion of the pressure mass matrix costs basically nothing. Consequently, if it helps us reduce the overall number of iterations somewhat, then this effort is well spent.

    That said, even though the solver worked well for step-31, we have a problem here that is a bit more complicated (cells are deformed, the pressure varies by orders of magnitude, and we want to plan ahead for more complicated physics), and so we'll change a few things slightly:

    • For more complex problems, it turns out that using just a single AMG V-cycle as preconditioner is not always sufficient. The outer solver converges just fine most of the time in a reasonable number of iterations (say, less than 50) but there are the occasional time step where it suddenly takes 700 or so. What exactly is going on there is hard to determine, but the problem can be avoided by using a more accurate solver for the top left block. Consequently, we'll want to use a CG iteration to invert the top left block of the preconditioner matrix, and use the AMG as a preconditioner for the CG solver.
    • The downside of this is that, of course, the Stokes preconditioner becomes much more expensive (approximately 10 times more expensive than when we just use a single V-cycle). Our strategy then is this: let's do up to 30 GMRES iterations with just the V-cycle as a preconditioner and if that doesn't yield convergence, then take the best approximation of the Stokes solution obtained after this first round of iterations and use that as the starting guess for iterations where we use the full inner solver with a rather lenient tolerance as preconditioner. In all our experiments this leads to convergence in only a few additional iterations.
    • -
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • +
    • One thing we need to pay attention to is that when using a CG with a lenient tolerance in the preconditioner, then $y = \tilde A^{-1} r$ is no longer a linear function of $r$ (it is, of course, if we have a very stringent tolerance in our solver, or if we only apply a single V-cycle). This is a problem since now our preconditioner is no longer a linear operator; in other words, every time GMRES uses it the preconditioner looks different. The standard GMRES solver can't deal with this, leading to slow convergence or even breakdown, but the F-GMRES variant is designed to deal with exactly this kind of situation and we consequently use it.
    • On the other hand, once we have settled on using F-GMRES we can relax the tolerance used in inverting the preconditioner for $S$. In step-31, we ran a preconditioned CG method on $\tilde S$ until the residual had been reduced by 7 orders of magnitude. Here, we can again be more lenient because we know that the outer preconditioner doesn't suffer.
    • In step-31, we used a left preconditioner in which we first invert the top left block of the preconditioner matrix, then apply the bottom left (divergence) one, and then invert the bottom right. In other words, the application of the preconditioner acts as a lower left block triangular matrix. Another option is to use a right preconditioner that here would be upper right block triangulation, i.e., we first invert the bottom right Schur complement, apply the top right (gradient) operator and then invert the elliptic top left block. To a degree, which one to choose is a matter of taste. That said, there is one significant advantage to a right preconditioner in GMRES-type solvers: the residual with which we determine whether we should stop the iteration is the true residual, not the norm of the preconditioned equations. Consequently, it is much simpler to compare it to the stopping criterion we typically use, namely the norm of the right hand side vector. In writing this code we found that the scaling issues we discussed above also made it difficult to determine suitable stopping criteria for left-preconditioned linear systems, and consequently this program uses a right preconditioner.
    • In step-31, we used an IC (incomplete Cholesky) preconditioner for the pressure mass matrix in the Schur complement preconditioner and for the solution of the temperature system. Here, we could in principle do the same, but we do choose an even simpler preconditioner, namely a Jacobi preconditioner for both systems. This is because here we target at massively parallel computations, where the decompositions for IC/ILU would have to be performed block-wise for the locally owned degrees of freedom on each processor. This means, that the preconditioner gets more like a Jacobi preconditioner anyway, so we rather start from that variant straight away. Note that we only use the Jacobi preconditioners for CG solvers with mass matrices, where they give optimal (h-independent) convergence anyway, even though they usually require about twice as many iterations as an IC preconditioner.
    -

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
-\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    +

    As a final note, let us remark that in step-31 we computed the Schur complement $S=B A^{-1} B^T$ by approximating $-\text{div}(-\eta\Delta)^{-1}\nabla \approx \frac 1{\eta} \mathbf{1}$. Now, however, we have re-scaled the $B$ and $B^T$ operators. So $S$ should now approximate $-\frac{\eta}{L}\text{div}(-\eta\Delta)^{-1}\nabla \frac{\eta}{L} \approx
+\left(\frac{\eta}{L}\right)^2 \frac 1{\eta} \mathbf{1}$. We use the discrete form of the right hand side of this as our approximation $\tilde S$ to $S$.

    Changes to the artificial viscosity stabilization

    -

    Similarly to step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    -\begin{eqnarray*}
+<p>Similarly to <a class=step-31, we will use an artificial viscosity for stabilization based on a residual of the equation. As a difference to step-31, we will provide two slightly different definitions of the stabilization parameter. For $\alpha=1$, we use the same definition as in step-31:

    +\begin{eqnarray*}
   \nu_\alpha(T)|_K
   =
   \nu_1(T)|_K
@@ -338,76 +338,76 @@
     1,
     \frac{\|R_1(T)\|_{L^\infty(K)}}{c(\mathbf{u},T)}
   \right\}
-\end{eqnarray*} +\end{eqnarray*}" src="form_4294.png"/>

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-11-15 06:44:29.663673666 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_33.html 2024-11-15 06:44:29.663673666 +0000 @@ -179,16 +179,16 @@

    Introduction

    Euler flow

    The equations that describe the movement of a compressible, inviscid gas (the so-called Euler equations of gas dynamics) are a basic system of conservation laws. In spatial dimension $d$ they read

    -\[
+<picture><source srcset=\[
 \partial_t \mathbf{w} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_4391.png"/>

    -

    with the solution $\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
-E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
-G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    -

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    -\begin{eqnarray*}
+<p> with the solution  <picture><source srcset=$\mathbf{w}=(\rho v_1,\ldots,\rho v_d,\rho,
+E)^{\top}$ consisting of $\rho$ the fluid density, ${\mathbf v}=(v_1,\ldots v_d)^T$ the flow velocity (and thus $\rho\mathbf v$ being the linear momentum density), and $E$ the energy density of the gas. We interpret the equations above as $\partial_t \mathbf{w}_i + \nabla \cdot \mathbf{F}_i(\mathbf{w}) = \mathbf
+G_i(\mathbf w)$, $i=1,\ldots,dim+2$.

    +

    For the Euler equations, the flux matrix $\mathbf F$ (or system of flux functions) is defined as (shown here for the case $d=3$)

    +\begin{eqnarray*}
   \mathbf F(\mathbf w)
   =
   \left(
@@ -200,10 +200,10 @@
     (E+p) v_1 & (E+p) v_2 & (E+p) v_3
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4398.png"/>

    and we will choose as particular right hand side forcing only the effects of gravity, described by

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \mathbf G(\mathbf w)
   =
   \left(
@@ -215,43 +215,43 @@
     \rho \mathbf g \cdot \mathbf v
   \end{array}
   \right),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4399.png"/>

    -

    where $\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    -\begin{eqnarray*}
+<p> where <picture><source srcset=$\mathbf g=(g_1,g_2,g_3)^T$ denotes the gravity vector. With this, the entire system of equations reads:

    +\begin{eqnarray*}
   \partial_t (\rho v_i) + \sum_{s=1}^d \frac{\partial(\rho v_i v_s +
   \delta_{is} p)}{\partial x_s} &=& g_i \rho, \qquad i=1,\dots,d, \\
   \partial_t \rho + \sum_{s=1}^d \frac{\partial(\rho v_s)}{\partial x_s} &=& 0,  \\
   \partial_t E + \sum_{s=1}^d \frac{\partial((E+p)v_s)}{\partial x_s} &=&
   \rho \mathbf g \cdot \mathbf v.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4401.png"/>

    -

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
-(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    +

    These equations describe, respectively, the conservation of momentum, mass, and energy. The system is closed by a relation that defines the pressure: $p =
+(\gamma -1)(E-\frac{1}{2} \rho |\mathbf v|^2)$. For the constituents of air (mainly nitrogen and oxygen) and other diatomic gases, the ratio of specific heats is $\gamma=1.4$.

    This problem obviously falls into the class of vector-valued problems. A general overview of how to deal with these problems in deal.II can be found in the Handling vector valued problems topic.

    Discretization

    -

    Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    -\begin{eqnarray*}
+<p>Discretization happens in the usual way, taking into account that this is a hyperbolic problem in the same style as the simple one discussed in <a class=step-12: We choose a finite element space $V_h$, and integrate our conservation law against our (vector-valued) test function $\mathbf{z} \in V_h$. We then integrate by parts and approximate the boundary flux with a numerical flux $\mathbf{H}$,

    +\begin{eqnarray*}
 &&\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) + (\nabla \cdot \mathbf{F}(\mathbf{w}), \mathbf{z}) \\
 &\approx &\int_{\Omega} (\partial_t \mathbf{w}, \mathbf{z}) - (\mathbf{F}(\mathbf{w}), \nabla \mathbf{z}) + h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z}) + \int_{\partial \Omega} (\mathbf{H}(\mathbf{w}^+, \mathbf{w}^-, \mathbf{n}), \mathbf{z}^+),
-\end{eqnarray*} +\end{eqnarray*}" src="form_4406.png"/>

    -

    where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

    -

    On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      +

      where a superscript $+$ denotes the interior trace of a function, and $-$ represents the outer trace. The diffusion term $h^{\eta}(\nabla \mathbf{w} , \nabla \mathbf{z})$ is introduced strictly for stability, where $h$ is the mesh size and $\eta$ is a parameter prescribing how much diffusion to add.

      +

      On the boundary, we have to say what the outer trace $\mathbf{w}^-$ is. Depending on the boundary condition, we prescribe either of the following:

      • -Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • +Inflow boundary: $\mathbf{w}^-$ is prescribed to be the desired value.
      • -Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • +Supersonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$
      • -Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • +Subsonic outflow boundary: $\mathbf{w}^- = \mathbf{w}^+$ except that the energy variable is modified to support a prescribed pressure $p_o$, i.e. $\mathbf{w}^- =(\rho^+, \rho v_1^+, \dots, \rho v_d^+, p_o/(\gamma -1) + 0.5 \rho |\mathbf{v}^+|^2)$
      • -Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.
      • +Reflective boundary: we set $\mathbf{w}^-$ so that $(\mathbf{v}^+ + \mathbf{v}^-) \cdot \mathbf{n} = 0$ and $\rho^- = \rho^+,E^-=E^+$.

      More information on these issues can be found, for example, in Ralf Hartmann's PhD thesis ("Adaptive Finite Element Methods for the Compressible Euler Equations", PhD thesis, University of Heidelberg, 2002).

      -

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      +

      We use a time stepping scheme to substitute the time derivative in the above equations. For simplicity, we define $ \mathbf{B}({\mathbf{w}_{n}})(\mathbf z) $ as the spatial residual at time step $n$ :

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
  \mathbf{B}(\mathbf{w}_{n})(\mathbf z)  &=&
 - \int_{\Omega} \left(\mathbf{F}(\mathbf{w}_n),
 \nabla\mathbf{z}\right) +  h^{\eta}(\nabla \mathbf{w}_n , \nabla \mathbf{z}) \\
@@ -261,43 +261,43 @@
 -
 \int_{\Omega} \left(\mathbf{G}(\mathbf{w}_n),
 \mathbf{z}\right) .
-\end{eqnarray*} +\end{eqnarray*}" src="form_4417.png"/>

      -

      At each time step, our full discretization is thus that the residual applied to any test function $\mathbf z$ equals zero:

      -\begin{eqnarray*}
+<p>At each time step, our full discretization is thus that the residual applied to any test function <picture><source srcset=$\mathbf z$ equals zero:

      +\begin{eqnarray*}
 R(\mathbf{W}_{n+1})(\mathbf z) &=&
 \int_{\Omega} \left(\frac{{\mathbf w}_{n+1} - \mathbf{w}_n}{\delta t},
 \mathbf{z}\right)+
 \theta \mathbf{B}({\mathbf{w}}_{n+1}) +  (1-\theta) \mathbf{B}({\mathbf w}_{n}) \\
 &=& 0
-\end{eqnarray*} +\end{eqnarray*}" src="form_4419.png"/>

      -

      where $ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      -

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
+<p> where <picture><source srcset=$ \theta \in [0,1] $ and $\mathbf{w}_i = \sum_k \mathbf{W}_i^k \mathbf{\phi}_k$. Choosing $\theta=0$ results in the explicit (forward) Euler scheme, $\theta=1$ in the stable implicit (backward) Euler scheme, and $\theta=\frac 12$ in the Crank-Nicolson scheme.

      +

      In the implementation below, we choose the Lax-Friedrichs flux for the function $\mathbf H$, i.e. $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) =
 \frac{1}{2}(\mathbf{F}(\mathbf{a})\cdot \mathbf{n} +
-\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      -

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      -\begin{eqnarray*}
+\mathbf{F}(\mathbf{b})\cdot \mathbf{n} + \alpha (\mathbf{a} - \mathbf{b}))$, where $\alpha$ is either a fixed number specified in the input file, or where $\alpha$ is a mesh dependent value. In the latter case, it is chosen as $\frac{h}{2\delta T}$ with $h$ the diameter of the face to which the flux is applied, and $\delta T$ the current time step.

      +

      With these choices, equating the residual to zero results in a nonlinear system of equations $R(\mathbf{W}_{n+1})=0$. We solve this nonlinear system by a Newton iteration (in the same way as explained in step-15), i.e. by iterating

      +\begin{eqnarray*}
 R'(\mathbf{W}^k_{n+1},\delta \mathbf{W}_{n+1}^k)(\mathbf z) & = & -
 R(\mathbf{W}^{k}_{n+1})(\mathbf z) \qquad \qquad \forall \mathbf z\in V_h \\
 \mathbf{W}^{k+1}_{n+1} &=& \mathbf{W}^k_{n+1} + \delta \mathbf{W}^k_{n+1},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4426.png"/>

      -

      until $|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      -\begin{eqnarray*}
+<p> until <picture><source srcset=$|R(\mathbf{W}^k_{n+1})|$ (the residual) is sufficiently small. By testing with the nodal basis of a finite element space instead of all $\mathbf z$, we arrive at a linear system for $\delta \mathbf W$:

      +\begin{eqnarray*}
 \mathbf R'(\mathbf{W}^k_{n+1})\delta \mathbf{W}^k_{n+1} & = & -
 \mathbf R(\mathbf{W}^{k}_{n+1}).
-\end{eqnarray*} +\end{eqnarray*}" src="form_4429.png"/>

      This linear system is, in general, neither symmetric nor has any particular definiteness properties. We will either use a direct solver or Trilinos' GMRES implementation to solve it. As will become apparent from the results shown below, this fully implicit iteration converges very rapidly (typically in 3 steps) and with the quadratic convergence order expected from a Newton method.

      Automatic differentiation

      -

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic +

      Since computing the Jacobian matrix $\mathbf R'(\mathbf W^k)$ is a terrible beast, we use an automatic differentiation package, Sacado, to do this. Sacado is a package within the Trilinos framework and offers a C++ template class Sacado::Fad::DFad (Fad standing for "forward automatic differentiation") that supports basic arithmetic operators and functions such as sqrt, sin, cos, pow, etc. In order to use this feature, one declares a collection of variables of this type and then denotes some of this collection as degrees of freedom, the rest of the variables being functions of the independent variables. These variables are used in an algorithm, and as the variables are used, their sensitivities with respect to the degrees of freedom are continuously updated.

      -

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
-R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      -

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      -

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
-R$.

      +

      One can imagine that for the full Jacobian matrix as a whole, this could be prohibitively expensive: the number of independent variables are the $\mathbf W^k$, the dependent variables the elements of the vector $\mathbf
+R(\mathbf W^k)$. Both of these vectors can easily have tens of thousands of elements or more. However, it is important to note that not all elements of $\mathbf R$ depend on all elements of $\mathbf W^k$: in fact, an entry in $\mathbf R$ only depends on an element of $\mathbf W^k$ if the two corresponding shape functions overlap and couple in the weak form.

      +

      Specifically, it is wise to define a minimum set of independent AD variables that the residual on the current cell may possibly depend on: on every element, we define those variables as independent that correspond to the degrees of freedom defined on this cell (or, if we have to compute jump terms between cells, that correspond to degrees of freedom defined on either of the two adjacent cells), and the dependent variables are the elements of the local residual vector. Not doing this, i.e. defining all elements of $\mathbf W^k$ as independent, will result a very expensive computation of a lot of zeros: the elements of the local residual vector are independent of almost all elements of the solution vector, and consequently their derivatives are zero; however, trying to compute these zeros can easily take 90% or more of the compute time of the entire program, as shown in an experiment inadvertently made by a student a few years after this program was first written.

      +

      Coming back to the question of computing the Jacobian automatically: The author has used this approach side by side with a hand coded Jacobian for the incompressible Navier-Stokes problem and found the Sacado approach to be just as fast as using a hand coded Jacobian, but infinitely simpler and less error prone: Since using the auto-differentiation requires only that one code the residual $R(\mathbf{W})$, ensuring code correctness and maintaining code becomes tremendously more simple – the Jacobian matrix $\mathbf R'$ is computed by essentially the same code that also computes the residual $\mathbf
+R$.

      All this said, here's a very simple example showing how Sacado can be used:

      #href_anchor"line">#include <iostream>
      @@ -320,8 +320,8 @@
      std::cout << "dc/da = " << derivs[0] << ", dc/db=" << derivs[1] << std::endl;
      }
      -

      The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
-\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

      +

    The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
+\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.

    It should be noted that Sacado provides more auto-differentiation capabilities than the small subset used in this program. However, understanding the example above is enough to understand the use of Sacado in this Euler flow program.

    Trilinos solvers

    The program uses either the Aztec iterative solvers, or the Amesos sparse direct solver, both provided by the Trilinos package. This package is inherently designed to be used in a parallel program, however, it may be used in serial just as easily, as is done here. The Epetra package is the basic vector/matrix library upon which the solvers are built. This very powerful package can be used to describe the parallel distribution of a vector, and to define sparse matrices that operate on these vectors. Please view the commented code for more details on how these solvers are used within the example.

    @@ -338,8 +338,8 @@

    Implementation

    The implementation of this program is split into three essential parts:

    • -

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
-  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

      +

      The EulerEquations class that encapsulates everything that completely describes the specifics of the Euler equations. This includes the flux matrix $\mathbf F(\mathbf W)$, the numerical flux $\mathbf F(\mathbf
+  W^+,\mathbf W^-,\mathbf n)$, the right hand side $\mathbf G(\mathbf W)$, boundary conditions, refinement indicators, postprocessing the output, and similar things that require knowledge of the meaning of the individual components of the solution vectors and the equations.

    • @@ -447,12 +447,12 @@

      Transformations between variables

      -

      Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

      +

      Next, we define the gas constant. We will set it to 1.4 in its definition immediately following the declaration of this class (unlike integer variables, like the ones above, static const floating point member variables cannot be initialized within the class declaration in C++). This value of 1.4 is representative of a gas that consists of molecules composed of two atoms, such as air which consists up to small traces almost entirely of $N_2$ and $O_2$.

        static const double gas_gamma;
       
       
      -

      In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
-   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

      +

      In the following, we will need to compute the kinetic energy and the pressure from a vector of conserved variables. This we can do based on the energy density and the kinetic energy $\frac 12 \rho |\mathbf v|^2
+   = \frac{|\rho \mathbf v|^2}{2\rho}$ (note that the independent variables contain the momentum components $\rho v_i$, not the velocities $v_i$).

        template <typename InputVector>
        static typename InputVector::value_type
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-11-15 06:44:29.735674309 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_34.html 2024-11-15 06:44:29.735674309 +0000 @@ -152,7 +152,7 @@

      Irrotational flow

      The incompressible motion of an inviscid fluid past a body (for example air past an airplane wing, or air or water past a propeller) is usually modeled by the Euler equations of fluid dynamics:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \frac{\partial }{\partial t}\mathbf{v} + (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p + \mathbf{g}
@@ -160,12 +160,12 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4505.png"/>

      -

      where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

      +

      where the fluid density $\rho$ and the acceleration $\mathbf{g}$ due to external forces are given and the velocity $\mathbf{v}$ and the pressure $p$ are the unknowns. Here $\Omega$ is a closed bounded region representing the body around which the fluid moves.

      The above equations can be derived from Navier-Stokes equations assuming that the effects due to viscosity are negligible compared to those due to the pressure gradient, inertial forces and the external forces. This is the opposite case of the Stokes equations discussed in step-22 which are the limit case of dominant viscosity, i.e. where the velocity is so small that inertia forces can be neglected. On the other hand, owing to the assumed incompressibility, the equations are not suited for very high speed gas flows where compressibility and the equation of state of the gas have to be taken into account, leading to the Euler equations of gas dynamics, a hyperbolic system.

      For the purpose of this tutorial program, we will consider only stationary flow without external forces:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   (\mathbf{v}\cdot\nabla)\mathbf{v}
   &=
   -\frac{1}{\rho}\nabla p
@@ -173,159 +173,159 @@
   \\
   \nabla \cdot \mathbf{v}&=0
   &\text{in } \mathbb{R}^n\backslash\Omega
-\end{align*} +\end{align*}" src="form_4506.png"/>

      Uniqueness of the solution of the Euler equations is ensured by adding the boundary conditions

      -\[
+<picture><source srcset=\[
   \label{eq:boundary-conditions}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{v}& = 0 \qquad && \text{ on } \partial\Omega \\
     \mathbf{v}& = \mathbf{v}_\infty && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4507.png"/>

      -

      which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

      +

      which is to say that the body is at rest in our coordinate systems and is not permeable, and that the fluid has (constant) velocity $\mathbf{v}_\infty$ at infinity. An alternative viewpoint is that our coordinate system moves along with the body whereas the background fluid is at rest at infinity. Notice that we define the normal $\mathbf{n}$ as the outer normal to the domain $\Omega$, which is the opposite of the outer normal to the integration domain.

      For both stationary and non stationary flow, the solution process starts by solving for the velocity in the second equation and substituting in the first equation in order to find the pressure. The solution of the stationary Euler equations is typically performed in order to understand the behavior of the given (possibly complex) geometry when a prescribed motion is enforced on the system.

      -

      The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity $\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
-\mathbf{v}_\infty=0$) and we have boundary conditions

      -\[
+<p>The first step in this process is to change the frame of reference from a coordinate system moving along with the body to one in which the body moves through a fluid that is at rest at infinity. This can be expressed by introducing a new velocity <picture><source srcset=$\mathbf{\tilde{v}}=\mathbf{v}-\mathbf{v}_\infty$ for which we find that the same equations hold (because $\nabla\cdot
+\mathbf{v}_\infty=0$) and we have boundary conditions

      +\[
   \label{eq:boundary-conditions-tilde}
   \begin{aligned}
     \mathbf{n}\cdot\mathbf{\tilde{v}}& = -\mathbf{n}\cdot\mathbf{v}_\infty \qquad && \text{ on } \partial\Omega \\
     \mathbf{\tilde{v}}& = 0 && \text{ when } |\mathbf{x}| \to \infty,
   \end{aligned}
-\] +\]" src="form_4511.png"/>

      -

      If we assume that the fluid is irrotational, i.e., $\nabla \times
-\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

      -\[
+<p>If we assume that the fluid is irrotational, i.e.,  <picture><source srcset=$\nabla \times
+\mathbf{v}=0$ in $\mathbb{R}^n\backslash\Omega$, we can represent the velocity, and consequently also the perturbation velocity, as the gradient of a scalar function:

      +\[
   \mathbf{\tilde{v}}=\nabla\phi,
-\] +\]" src="form_4514.png"/>

      and so the second part of Euler equations above can be rewritten as the homogeneous Laplace equation for the unknown $\phi$:

      -\begin{align*}
+<picture><source srcset=\begin{align*}
 \label{laplace}
 \Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
            \\
            \mathbf{n}\cdot\nabla\phi &= -\mathbf{n}\cdot\mathbf{v}_\infty
            && \text{on}\ \partial\Omega
-\end{align*} +\end{align*}" src="form_4515.png"/>

      -

      while the momentum equation reduces to Bernoulli's equation that expresses the pressure $p$ as a function of the potential $\phi$:

      -\[
+<p> while the momentum equation reduces to Bernoulli's equation that expresses the pressure <picture><source srcset=$p$ as a function of the potential $\phi$:

      +\[
 \frac{p}{\rho} +\frac{1}{2} | \nabla \phi |^2 = 0 \in \Omega.
-\] +\]" src="form_4516.png"/>

      So we can solve the problem by solving the Laplace equation for the potential. We recall that the following functions, called fundamental solutions of the Laplace equation,

      -\[ \begin{aligned}
+<picture><source srcset=\[ \begin{aligned}
 \label{eq:3} G(\mathbf{y}-\mathbf{x}) = &
 -\frac{1}{2\pi}\ln|\mathbf{y}-\mathbf{x}| \qquad && \text{for } n=2 \\
 G(\mathbf{y}-\mathbf{x}) = &
 \frac{1}{4\pi}\frac{1}{|\mathbf{y}-\mathbf{x}|}&& \text{for } n=3,
 \end{aligned}
-\] +\]" src="form_4517.png"/>

      satisfy in a distributional sense the equation:

      -\[
+<picture><source srcset=\[
 -\Delta_y G(\mathbf{y}-\mathbf{x}) = \delta(\mathbf{y}-\mathbf{x}),
-\] +\]" src="form_4518.png"/>

      -

      where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

      +

      where the derivative is done in the variable $\mathbf{y}$. By using the usual Green identities, our problem can be written on the boundary $\partial\Omega = \Gamma$ only. We recall the general definition of the second Green identity:

      -\[\label{green}
+<picture><source srcset=\[\label{green}
   \int_{\omega}
   (-\Delta u)v\,dx + \int_{\partial\omega} \frac{\partial u}{\partial \tilde{\mathbf{n}} }v \,ds
   =
   \int_{\omega}
   (-\Delta v)u\,dx + \int_{\partial\omega} u\frac{\partial v}{\partial \tilde{\mathbf{n}}} \,ds,
-\] +\]" src="form_4521.png"/>

      -

      where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

      -

      In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
-\Gamma$, where the "boundary" at infinity is defined as

      +

      where $\tilde{\mathbf{n}}$ is the normal to the surface of $\omega$ pointing outwards from the domain of integration $\omega$.

      +

      In our case the domain of integration is the domain $\mathbb{R}^n\backslash\Omega$, whose boundary is $ \Gamma_\infty \cup
+\Gamma$, where the "boundary" at infinity is defined as

      -\[
+<picture><source srcset=\[
 \Gamma_\infty \dealcoloneq \lim_{r\to\infty} \partial B_r(0).
-\] +\]" src="form_4524.png"/>

      -

      In our program the normals are defined as outer to the domain $\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

      -

      If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

      -\[
+<p>In our program the normals are defined as <em>outer</em> to the domain <picture><source srcset=$\Omega$, that is, they are in fact inner to the integration domain, and some care is required in defining the various integrals with the correct signs for the normals, i.e. replacing $\tilde{\mathbf{n}}$ by $-\mathbf{n}$.

      +

      If we substitute $u$ and $v$ in the Green identity with the solution $\phi$ and with the fundamental solution of the Laplace equation respectively, as long as $\mathbf{x}$ is chosen in the region $\mathbb{R}^n\backslash\Omega$, we obtain:

      +\[
   \phi(\mathbf{x}) -
   \int_{\Gamma\cup\Gamma_\infty}\frac{\partial G(\mathbf{y}-\mathbf{x})}{\partial \mathbf{n}_y}\phi(\mathbf{y})\,ds_y
   =
   -\int_{\Gamma\cup\Gamma_\infty}G(\mathbf{y}-\mathbf{x})\frac{\partial \phi}{\partial \mathbf{n}_y}(\mathbf{y})\,ds_y
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega
-\] +\]" src="form_4526.png"/>

      where the normals are now pointing inward the domain of integration.

      -

      Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

      -

      The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

      +

      Notice that in the above equation, we also have the integrals on the portion of the boundary at $\Gamma_\infty$. Using the boundary conditions of our problem, we have that $\nabla \phi$ is zero at infinity (which simplifies the integral on $\Gamma_\infty$ on the right hand side).

      +

      The integral on $\Gamma_\infty$ that appears on the left hand side can be treated by observing that $\nabla\phi=0$ implies that $\phi$ at infinity is necessarily constant. We define its value to be $\phi_\infty$. It is an easy exercise to prove that

      -\[
+<picture><source srcset=\[
 -\int_{\Gamma_\infty} \frac{\partial G(\mathbf{y}-\mathbf{x})}
 {\partial \mathbf{n}_y}\phi_\infty \,ds_y =
 \lim_{r\to\infty} \int_{\partial B_r(0)} \frac{\mathbf{r}}{r} \cdot \nabla G(\mathbf{y}-\mathbf{x})
 \phi_\infty \,ds_y = -\phi_\infty.
-\] +\]" src="form_4531.png"/>

      Using this result, we can reduce the above equation only on the boundary $\Gamma$ using the so-called Single and Double Layer Potential operators:

      -\[\label{integral}
+<picture><source srcset=\[\label{integral}
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty
   -\left(S \frac{\partial \phi}{\partial n_y}\right)(\mathbf{x})
   \qquad \forall\mathbf{x}\in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4532.png"/>

      -

      (The name of these operators comes from the fact that they describe the electric potential in $\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

      -

      In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

      -\[
+<p>(The name of these operators comes from the fact that they describe the electric potential in <picture><source srcset=$\mathbb{R}^n$ due to a single thin sheet of charges along a surface, and due to a double sheet of charges and anti-charges along the surface, respectively.)

      +

      In our case, we know the Neumann values of $\phi$ on the boundary: $\mathbf{n}\cdot\nabla\phi = -\mathbf{n}\cdot\mathbf{v}_\infty$. Consequently,

      +\[
   \phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
    \left(S[\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
    \qquad \forall\mathbf{x} \in \mathbb{R}^n\backslash\Omega.
-\] +\]" src="form_4534.png"/>

      -

      If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

      +

      If we take the limit for $\mathbf{x}$ tending to $\Gamma$ of the above equation, using well known properties of the single and double layer operators, we obtain an equation for $\phi$ just on the boundary $\Gamma$ of $\Omega$:

      -\[\label{SD}
+<picture><source srcset=\[\label{SD}
   \alpha(\mathbf{x})\phi(\mathbf{x}) - (D\phi)(\mathbf{x}) = \phi_\infty +
   \left(S [\mathbf{n}\cdot\mathbf{v}_\infty]\right)(\mathbf{x})
   \quad \mathbf{x}\in \partial\Omega,
-\] +\]" src="form_4535.png"/>

      -

      which is the Boundary Integral Equation (BIE) we were looking for, where the quantity $\alpha(\mathbf{x})$ is the fraction of angle or solid angle by which the point $\mathbf{x}$ sees the domain of integration $\mathbb{R}^n\backslash\Omega$.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-11-15 06:44:29.811674988 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_35.html 2024-11-15 06:44:29.811674988 +0000 @@ -157,12 +157,12 @@ \nabla \cdot u = 0, \end{align*}" src="form_4615.png"/>

      -

      where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

      +

      where $u$ represents the velocity of the flow and $p$ the pressure. This system of equations is supplemented by the initial condition

      \[
   u |_{t=0} = u_0,
 \]

      -

      with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

      +

      with $u_0$ sufficiently smooth and solenoidal, and suitable boundary conditions. For instance, an admissible boundary condition, is

      \[
   u|_{\partial\Omega} = u_b.
 \] @@ -177,14 +177,14 @@ u\times n|_{\Gamma_2} = 0, \quad p|_{\Gamma_2} = 0 \]" src="form_4620.png"/>

      -

      where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

      +

      where $n$ is the outer unit normal. The boundary conditions on $\Gamma_2$ are often used to model outflow conditions.

      In previous tutorial programs (see for instance step-20 and step-22) we have seen how to solve the time-independent Stokes equations using a Schur complement approach. For the time-dependent case, after time discretization, we would arrive at a system like

      \begin{align*}
   \frac1\tau u^k - \nu \Delta u^k + \nabla p^k = F^k, \\
   \nabla \cdot u^k = 0,
 \end{align*}

      -

      where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

      +

      where $\tau$ is the time-step. Although the structure of this system is similar to the Stokes system and thus it could be solved using a Schur complement approach, it turns out that the condition number of the Schur complement is proportional to $\tau^{-2}$. This makes the system very difficult to solve, and means that for the Navier-Stokes equations, this is not a useful avenue to the solution.

      Projection methods

      Rather, we need to come up with a different approach to solve the time-dependent Navier-Stokes equations. The difficulty in their solution comes from the fact that the velocity and the pressure are coupled through the constraint

      \[
@@ -276,7 +276,7 @@
 </p>
  with <picture><source srcset=$v^{k+1}\in H$. Taking the divergence of this equation we arrive at the projection equation.

    • -The more accurate of the two variants outlined above is the rotational one. However, the program below implements both variants. Moreover, in the author's experience, the standard form is the one that should be used if, for instance, the viscosity $\nu$ is variable.
    • +The more accurate of the two variants outlined above is the rotational one. However, the program below implements both variants. Moreover, in the author's experience, the standard form is the one that should be used if, for instance, the viscosity $\nu$ is variable.

    The standard incremental scheme and the rotational incremental scheme were first considered by van Kan in

    • @@ -1562,7 +1562,7 @@

      NavierStokesProjection::output_results

      This method plots the current solution. The main difficulty is that we want to create a single output file that contains the data for all velocity components, the pressure, and also the vorticity of the flow. On the other hand, velocities and the pressure live on separate DoFHandler objects, and so can't be written to the same file using a single DataOut object. As a consequence, we have to work a bit harder to get the various pieces of data into a single DoFHandler object, and then use that to drive graphical output.

      We will not elaborate on this process here, but rather refer to step-32, where a similar procedure is used (and is documented) to create a joint DoFHandler object for all variables.

      -

      Let us also note that we here compute the vorticity as a scalar quantity in a separate function, using the $L^2$ projection of the quantity $\text{curl} u$ onto the finite element space used for the components of the velocity. In principle, however, we could also have computed it as a pointwise quantity from the velocity, and do so through the DataPostprocessor mechanism discussed in step-29 and step-33.

      +

      Let us also note that we here compute the vorticity as a scalar quantity in a separate function, using the $L^2$ projection of the quantity $\text{curl} u$ onto the finite element space used for the components of the velocity. In principle, however, we could also have computed it as a pointwise quantity from the velocity, and do so through the DataPostprocessor mechanism discussed in step-29 and step-33.

        template <int dim>
        void NavierStokesProjection<dim>::output_results(const unsigned int step)
        {
      /usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 2024-11-15 06:44:29.851675345 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_36.html 2024-11-15 06:44:29.851675345 +0000 @@ -158,7 +158,7 @@ \Omega\quad, \\ \Psi &= 0 &&\text{on}\ \partial\Omega\quad. \end{align*}" src="form_4670.png"/>

      -

      As a consequence, this particle can only exist in a certain number of eigenstates that correspond to the energy eigenvalues $\varepsilon_\ell$ admitted as solutions of this equation. The orthodox (Copenhagen) interpretation of quantum mechanics posits that, if a particle has energy $\varepsilon_\ell$ then the probability of finding it at location $\mathbf x$ is proportional to $|\Psi_\ell(\mathbf
+<p> As a consequence, this particle can only exist in a certain number of eigenstates that correspond to the energy eigenvalues <picture><source srcset=$\varepsilon_\ell$ admitted as solutions of this equation. The orthodox (Copenhagen) interpretation of quantum mechanics posits that, if a particle has energy $\varepsilon_\ell$ then the probability of finding it at location $\mathbf x$ is proportional to $|\Psi_\ell(\mathbf
 x)|^2$ where $\Psi_\ell$ is the eigenfunction that corresponds to this eigenvalue.

      In order to numerically find solutions to this equation, i.e. a set of pairs of eigenvalues/eigenfunctions, we use the usual finite element approach of multiplying the equation from the left with test functions, integrating by parts, and searching for solutions in finite dimensional spaces by approximating $\Psi(\mathbf
 x)\approx\Psi_h(\mathbf x)=\sum_{j}\phi_j(\mathbf x)\tilde\psi_j$, where $\tilde\psi$ is a vector of expansion coefficients. We then immediately arrive at the following equation that discretizes the continuous eigenvalue problem:

      @@ -170,9 +170,9 @@ \[ A
 \tilde{\Psi} = \varepsilon_h M \tilde{\Psi} \quad, \]

      -

      where $A$ is the stiffness matrix arising from the differential operator $L$, and $M$ is the mass matrix. The solution to the eigenvalue problem is an eigenspectrum $\varepsilon_{h,\ell}$, with associated eigenfunctions $\Psi_\ell=\sum_j \phi_j\tilde{\psi}_j$.

      +

      where $A$ is the stiffness matrix arising from the differential operator $L$, and $M$ is the mass matrix. The solution to the eigenvalue problem is an eigenspectrum $\varepsilon_{h,\ell}$, with associated eigenfunctions $\Psi_\ell=\sum_j \phi_j\tilde{\psi}_j$.

      Eigenvalues and Dirichlet boundary conditions

      -

      In this program, we use Dirichlet boundary conditions for the wave function $\Psi$. What this means, from the perspective of a finite element code, is that only the interior degrees of freedom are real degrees of freedom: the ones on the boundary are not free but are forced to have a zero value, after all. On the other hand, the finite element method gains much of its power and simplicity from the fact that we just do the same thing on every cell, without having to think too much about where a cell is, whether it bounds on a less refined cell and consequently has a hanging node, or is adjacent to the boundary. All such checks would make the assembly of finite element linear systems unbearably difficult to write and even more so to read.

      +

      In this program, we use Dirichlet boundary conditions for the wave function $\Psi$. What this means, from the perspective of a finite element code, is that only the interior degrees of freedom are real degrees of freedom: the ones on the boundary are not free but are forced to have a zero value, after all. On the other hand, the finite element method gains much of its power and simplicity from the fact that we just do the same thing on every cell, without having to think too much about where a cell is, whether it bounds on a less refined cell and consequently has a hanging node, or is adjacent to the boundary. All such checks would make the assembly of finite element linear systems unbearably difficult to write and even more so to read.

      Consequently, of course, when you distribute degrees of freedom with your DoFHandler object, you don't care whether some of the degrees of freedom you enumerate are at a Dirichlet boundary. They all get numbers. We just have to take care of these degrees of freedom at a later time when we apply boundary values. There are two basic ways of doing this (either using MatrixTools::apply_boundary_values() after assembling the linear system, or using AffineConstraints::distribute_local_to_global() during assembly; see the constraints topic for more information), but both result in the same: a linear system that has a total number of rows equal to the number of all degrees of freedom, including those that lie on the boundary. However, degrees of freedom that are constrained by Dirichlet conditions are separated from the rest of the linear system by zeroing out the corresponding row and column, putting a single positive entry on the diagonal, and the corresponding Dirichlet value on the right hand side.

      If you assume for a moment that we had renumbered degrees of freedom in such a way that all of those on the Dirichlet boundary come last, then the linear system we would get when solving a regular PDE with a right hand side would look like this:

      \begin{align*}
@@ -188,8 +188,8 @@
   \end{pmatrix}.
 \end{align*}

      -

      Here, subscripts $i$ and $b$ correspond to interior and boundary degrees of freedom, respectively. The interior degrees of freedom satisfy the linear system $A_i U_i=F_i$ which yields the correct solution in the interior, and boundary values are determined by $U_b = D_b^{-1} F_b$ where $D_b$ is a diagonal matrix that results from the process of eliminating boundary degrees of freedom, and $F_b$ is chosen in such a way that $U_{b,j}=D_{b,jj}^{-1} F_{b,j}$ has the correct boundary values for every boundary degree of freedom $j$. (For the curious, the entries of the matrix $D_b$ result from adding modified local contributions to the global matrix where for the local matrices the diagonal elements, if non-zero, are set to their absolute value; otherwise, they are set to the average of absolute values of the diagonal. This process guarantees that the entries of $D_b$ are positive and of a size comparable to the rest of the diagonal entries, ensuring that the resulting matrix does not incur unreasonable losses of accuracy due to roundoff involving matrix entries of drastically different size. The actual values that end up on the diagonal are difficult to predict and you should treat them as arbitrary and unpredictable, but positive.)

      -

      For "regular" linear systems, this all leads to the correct solution. On the other hand, for eigenvalue problems, this is not so trivial. There, eliminating boundary values affects both matrices $A$ and $M$ that we will solve with in the current tutorial program. After elimination of boundary values, we then receive an eigenvalue problem that can be partitioned like this:

      +

      Here, subscripts $i$ and $b$ correspond to interior and boundary degrees of freedom, respectively. The interior degrees of freedom satisfy the linear system $A_i U_i=F_i$ which yields the correct solution in the interior, and boundary values are determined by $U_b = D_b^{-1} F_b$ where $D_b$ is a diagonal matrix that results from the process of eliminating boundary degrees of freedom, and $F_b$ is chosen in such a way that $U_{b,j}=D_{b,jj}^{-1} F_{b,j}$ has the correct boundary values for every boundary degree of freedom $j$. (For the curious, the entries of the matrix $D_b$ result from adding modified local contributions to the global matrix where for the local matrices the diagonal elements, if non-zero, are set to their absolute value; otherwise, they are set to the average of absolute values of the diagonal. This process guarantees that the entries of $D_b$ are positive and of a size comparable to the rest of the diagonal entries, ensuring that the resulting matrix does not incur unreasonable losses of accuracy due to roundoff involving matrix entries of drastically different size. The actual values that end up on the diagonal are difficult to predict and you should treat them as arbitrary and unpredictable, but positive.)

      +

      For "regular" linear systems, this all leads to the correct solution. On the other hand, for eigenvalue problems, this is not so trivial. There, eliminating boundary values affects both matrices $A$ and $M$ that we will solve with in the current tutorial program. After elimination of boundary values, we then receive an eigenvalue problem that can be partitioned like this:

      \begin{align*}
   \begin{pmatrix}
     A_i & 0 \\ 0 & D_A
@@ -212,7 +212,7 @@
   D_A \tilde \Psi_b = \epsilon_h D_M \Psi_b.
 \]

      -

      These eigenvalues are spurious since they result from an eigenvalue system that operates only on boundary nodes – nodes that are not real degrees of freedom. Of course, since the two matrices $D_A,D_M$ are diagonal, we can exactly quantify these spurious eigenvalues: they are $\varepsilon_{h,j}=D_{A,jj}/D_{M,jj}$ (where the indices $j$ corresponds exactly to the degrees of freedom that are constrained by Dirichlet boundary values).

      +

      These eigenvalues are spurious since they result from an eigenvalue system that operates only on boundary nodes – nodes that are not real degrees of freedom. Of course, since the two matrices $D_A,D_M$ are diagonal, we can exactly quantify these spurious eigenvalues: they are $\varepsilon_{h,j}=D_{A,jj}/D_{M,jj}$ (where the indices $j$ corresponds exactly to the degrees of freedom that are constrained by Dirichlet boundary values).

      So how does one deal with them? The fist part is to recognize when our eigenvalue solver finds one of them. To this end, the program computes and prints an interval within which these eigenvalues lie, by computing the minimum and maximum of the expression $\varepsilon_{h,j}=D_{A,jj}/D_{M,jj}$ over all constrained degrees of freedom. In the program below, this already suffices: we find that this interval lies outside the set of smallest eigenvalues and corresponding eigenfunctions we are interested in and compute, so there is nothing we need to do here.

      On the other hand, it may happen that we find that one of the eigenvalues we compute in this program happens to be in this interval, and in that case we would not know immediately whether it is a spurious or a true eigenvalue. In that case, one could simply scale the diagonal elements of either matrix after computing the two matrices, thus shifting them away from the frequency of interest in the eigen-spectrum. This can be done by using the following code, making sure that all spurious eigenvalues are exactly equal to $1.234\cdot 10^5$:

      for (unsigned int i = 0; i < dof_handler.n_dofs(); ++i)
      if (constraints.is_constrained(i))
      @@ -530,9 +530,9 @@
        eigenfunctions,
        eigenfunctions.size());
       
      -

      The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

      -

      Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
-   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

      +

      The output of the call above is a set of vectors and values. In eigenvalue problems, the eigenfunctions are only determined up to a constant that can be fixed pretty arbitrarily. Knowing nothing about the origin of the eigenvalue problem, SLEPc has no other choice than to normalize the eigenvectors to one in the $l_2$ (vector) norm. Unfortunately this norm has little to do with any norm we may be interested from a eigenfunction perspective: the $L_2(\Omega)$ norm, or maybe the $L_\infty(\Omega)$ norm.

      +

      Let us choose the latter and rescale eigenfunctions so that they have $\|\phi_i(\mathbf x)\|_{L^\infty(\Omega)}=1$ instead of $\|\Phi\|_{l_2}=1$ (where $\phi_i$ is the $i$th eigenfunction and $\Phi_i$ the corresponding vector of nodal values). For the $Q_1$ elements chosen here, we know that the maximum of the function $\phi_i(\mathbf x)$ is attained at one of the nodes, so $\max_{\mathbf
+   x}\phi_i(\mathbf x)=\max_j (\Phi_i)_j$, making the normalization in the $L_\infty$ norm trivial. Note that this doesn't work as easily if we had chosen $Q_k$ elements with $k>1$: there, the maximum of a function does not necessarily have to be attained at a node, and so $\max_{\mathbf x}\phi_i(\mathbf x)\ge\max_j (\Phi_i)_j$ (although the equality is usually nearly true).

        for (auto &eigenfunction : eigenfunctions)
        eigenfunction /= eigenfunction.linfty_norm();
       
      @@ -669,7 +669,7 @@
      set Global mesh refinement steps = 5
      set Number of eigenvalues/eigenfunctions = 5
      set Potential = 0
      -

      Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

      examples/step-36> make run
      +

      Here, the potential is zero inside the domain, and we know that the eigenvalues are given by $\lambda_{(mn)}=\frac{\pi^2}{4}(m^2+n^2)$ where $m,n\in{\mathbb N^+}$. Eigenfunctions are sines and cosines with $m$ and $n$ periods in $x$ and $y$ directions. This matches the output our program generates:

      examples/step-36> make run
      ============================ Running step-36
      Number of active cells: 1024
      Number of degrees of freedom: 1089
      @@ -730,7 +730,7 @@

    • -

      In our derivation of the problem we have assumed that the particle is confined to a domain $\Omega$ and that at the boundary of this domain its probability $|\Psi|^2$ of being is zero. This is equivalent to solving the eigenvalue problem on all of ${\mathbb R}^d$ and assuming that the energy potential is finite only inside a region $\Omega$ and infinite outside. It is relatively easy to show that $|\Psi(\mathbf x)|^2$ at all locations $\mathbf x$ where $V(\mathbf
+<p class=In our derivation of the problem we have assumed that the particle is confined to a domain $\Omega$ and that at the boundary of this domain its probability $|\Psi|^2$ of being is zero. This is equivalent to solving the eigenvalue problem on all of ${\mathbb R}^d$ and assuming that the energy potential is finite only inside a region $\Omega$ and infinite outside. It is relatively easy to show that $|\Psi(\mathbf x)|^2$ at all locations $\mathbf x$ where $V(\mathbf
 x)=\infty$. So the question is what happens if our potential is not of this form, i.e. there is no bounded domain outside of which the potential is infinite? In that case, it may be worth to just consider a very large domain at the boundary of which $V(\mathbf x)$ is at least very large, if not infinite. Play around with a few cases like this and explore how the spectrum and eigenfunctions change as we make the computational region larger and larger.

    • @@ -739,7 +739,7 @@

    • -

      The plots above show the wave function $\Psi(\mathbf x)$, but the physical quantity of interest is actually the probability density $|\Psi(\mathbf x)|^2$ for the particle to be at location $\mathbf x$. Some visualization programs can compute derived quantities from the data in an input file, but we can also do so right away when creating the output file. The facility to do that is the DataPostprocessor class that can be used in conjunction with the DataOut class. Examples of how this can be done can be found in step-29 and step-33.

      +

      The plots above show the wave function $\Psi(\mathbf x)$, but the physical quantity of interest is actually the probability density $|\Psi(\mathbf x)|^2$ for the particle to be at location $\mathbf x$. Some visualization programs can compute derived quantities from the data in an input file, but we can also do so right away when creating the output file. The facility to do that is the DataPostprocessor class that can be used in conjunction with the DataOut class. Examples of how this can be done can be found in step-29 and step-33.

    • /usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 2024-11-15 06:44:29.931676060 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_37.html 2024-11-15 06:44:29.931676060 +0000 @@ -166,23 +166,23 @@

      The major motivation for matrix-free methods is the fact that on today's processors access to main memory (i.e., for objects that do not fit in the caches) has become the bottleneck in many solvers for partial differential equations: To perform a matrix-vector product based on matrices, modern CPUs spend far more time waiting for data to arrive from memory than on actually doing the floating point multiplications and additions. Thus, if we could substitute looking up matrix elements in memory by re-computing them — or rather, the operator represented by these entries —, we may win in terms of overall run-time even if this requires a significant number of additional floating point operations. That said, to realize this with a trivial implementation is not enough and one needs to really look at the details to gain in performance. This tutorial program and the papers referenced above show how one can implement such a scheme and demonstrates the speedup that can be obtained.

      The test case

      In this example, we consider the Poisson problem

      -\begin{eqnarray*} -
+<picture><source srcset=\begin{eqnarray*} -
 \nabla \cdot a(\mathbf x) \nabla u &=& 1, \\ u &=& 0 \quad \text{on}\
-\partial \Omega \end{eqnarray*} +\partial \Omega \end{eqnarray*}" src="form_4718.png"/>

      -

      where $a(\mathbf x)$ is a variable coefficient. Below, we explain how to implement a matrix-vector product for this problem without explicitly forming the matrix. The construction can, of course, be done in a similar way for other equations as well.

      -

      We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{1}{0.05 +
-2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

      +

      where $a(\mathbf x)$ is a variable coefficient. Below, we explain how to implement a matrix-vector product for this problem without explicitly forming the matrix. The construction can, of course, be done in a similar way for other equations as well.

      +

      We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{1}{0.05 +
+2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

      Matrix-vector product implementation

      In order to find out how we can write a code that performs a matrix-vector product, but does not need to store the matrix elements, let us start at looking how a finite element matrix A is assembled:

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}}
 P_{\mathrm{cell,{loc-glob}}}^T A_{\mathrm{cell}} P_{\mathrm{cell,{loc-glob}}}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4721.png"/>

      In this formula, the matrix Pcell,loc-glob is a rectangular matrix that defines the index mapping from local degrees of freedom in the current cell to the global degrees of freedom. The information from which this operator can be built is usually encoded in the local_dof_indices variable and is used in the assembly calls filling matrices in deal.II. Here, Acell denotes the cell matrix associated with A.

      If we are to perform a matrix-vector product, we can hence use that

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 y &=& A\cdot u = \left(\sum_{\text{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 A_\mathrm{cell} P_\mathrm{cell,{loc-glob}}\right) \cdot u
 \\
@@ -191,7 +191,7 @@
 \\
 &=& \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 v_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4722.png"/>

      where ucell are the values of u at the degrees of freedom of the respective cell, and vcell=Acellucell correspondingly for the result. A naive attempt to implement the local action of the Laplacian would hence be to use the following code:

      Matrixfree<dim>::vmult (Vector<double> &dst,
      const Vector<double> &src) const
      @@ -250,43 +250,43 @@

      Here we neglected boundary conditions as well as any hanging nodes we may have, though neither would be very difficult to include using the AffineConstraints class. Note how we first generate the local matrix in the usual way as a sum over all quadrature points for each local matrix entry. To form the actual product as expressed in the above formula, we extract the values of src of the cell-related degrees of freedom (the action of Pcell,loc-glob), multiply by the local matrix (the action of Acell), and finally add the result to the destination vector dst (the action of Pcell,loc-globT, added over all the elements). It is not more difficult than that, in principle.

      While this code is completely correct, it is very slow. For every cell, we generate a local matrix, which takes three nested loops with loop length equal to the number of local degrees of freedom to compute. The multiplication itself is then done by two nested loops, which means that it is much cheaper.

      One way to improve this is to realize that conceptually the local matrix can be thought of as the product of three matrices,

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell} = B_\mathrm{cell}^T D_\mathrm{cell} B_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4723.png"/>

      where for the example of the Laplace operator the (q*dim+d,i)-th element of Bcell is given by fe_values.shape_grad(i,q)[d]. This matrix consists of dim*n_q_points rows and dofs_per_cell columns. The matrix Dcell is diagonal and contains the values fe_values.JxW(q) * coefficient_values[q] (or, rather, dim copies of each of these values). This kind of representation of finite element matrices can often be found in the engineering literature.

      When the cell matrix is applied to a vector,

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell}\cdot u_\mathrm{cell} = B_\mathrm{cell}^T
 D_\mathrm{cell} B_\mathrm{cell} \cdot u_\mathrm{cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4724.png"/>

      -

      one would then not form the matrix-matrix products, but rather multiply one matrix at a time with a vector from right to left so that only three successive matrix-vector products are formed. This approach removes the three nested loops in the calculation of the local matrix, which reduces the complexity of the work on one cell from something like $\mathcal
-{O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
-{O}(\mathrm{dofs\_per\_cell}^2)$. An interpretation of this algorithm is that we first transform the vector of values on the local DoFs to a vector of gradients on the quadrature points. In the second loop, we multiply these gradients by the integration weight and the coefficient. The third loop applies the second gradient (in transposed form), so that we get back to a vector of (Laplacian) values on the cell dofs.

      +

      one would then not form the matrix-matrix products, but rather multiply one matrix at a time with a vector from right to left so that only three successive matrix-vector products are formed. This approach removes the three nested loops in the calculation of the local matrix, which reduces the complexity of the work on one cell from something like $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^2)$. An interpretation of this algorithm is that we first transform the vector of values on the local DoFs to a vector of gradients on the quadrature points. In the second loop, we multiply these gradients by the integration weight and the coefficient. The third loop applies the second gradient (in transposed form), so that we get back to a vector of (Laplacian) values on the cell dofs.

      The bottleneck in the above code is the operations done by the call to FEValues::reinit for every cell, which take about as much time as the other steps together (at least if the mesh is unstructured; deal.II can recognize that the gradients are often unchanged on structured meshes). That is certainly not ideal and we would like to do better than this. What the reinit function does is to calculate the gradient in real space by transforming the gradient on the reference cell using the Jacobian of the transformation from real to reference cell. This is done for each basis function on the cell, for each quadrature point. The Jacobian does not depend on the basis function, but it is different on different quadrature points in general. If you only build the matrix once as we've done in all previous tutorial programs, there is nothing to be optimized since FEValues::reinit needs to be called on every cell. In this process, the transformation is applied while computing the local matrix elements.

      In a matrix-free implementation, however, we will compute those integrals very often because iterative solvers will apply the matrix many times during the solution process. Therefore, we need to think about whether we may be able to cache some data that gets reused in the operator applications, i.e., integral computations. On the other hand, we realize that we must not cache too much data since otherwise we get back to the situation where memory access becomes the dominating factor. Therefore, we will not store the transformed gradients in the matrix B, as they would in general be different for each basis function and each quadrature point on every element for curved meshes.

      The trick is to factor out the Jacobian transformation and first apply the gradient on the reference cell only. This operation interpolates the vector of values on the local dofs to a vector of (unit-coordinate) gradients on the quadrature points. There, we first apply the Jacobian that we factored out from the gradient, then apply the weights of the quadrature, and finally apply the transposed Jacobian for preparing the third loop which tests by the gradients on the unit cell and sums over quadrature points.

      Let us again write this in terms of matrices. Let the matrix Bcell denote the cell-related gradient matrix, with each row containing the values on the quadrature points. It is constructed by a matrix-matrix product as

      -\begin{eqnarray*} B_\mathrm{cell} =
-J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell}, \end{eqnarray*} +\begin{eqnarray*} B_\mathrm{cell} =
+J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell}, \end{eqnarray*}

      where Bref_cell denotes the gradient on the reference cell and J-Tcell denotes the inverse transpose Jacobian of the transformation from unit to real cell (in the language of transformations, the operation represented by J-Tcell represents a covariant transformation). J-Tcell is block-diagonal, and the blocks size is equal to the dimension of the problem. Each diagonal block is the Jacobian transformation that goes from the reference cell to the real cell.

      Putting things together, we find that

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 A_\mathrm{cell} = B_\mathrm{cell}^T D B_\mathrm{cell}
                 = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^{-1}
                   D_\mathrm{cell}
                   J_\mathrm{cell}^{-\mathrm T} B_\mathrm{ref\_cell},
-\end{eqnarray*} +\end{eqnarray*}" src="form_4728.png"/>

      so we calculate the product (starting the local product from the right)

      -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 v_\mathrm{cell} = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^{-1} D J_\mathrm{cell}^{-\mathrm T}
 B_\mathrm{ref\_cell} u_\mathrm{cell}, \quad
 v = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
 v_\mathrm{cell}.
-\end{eqnarray*} +\end{eqnarray*}" src="form_4729.png"/>

      FEValues<dim> fe_values_reference (fe, quadrature_formula,
      @@ -364,11 +364,11 @@
      @ update_inverse_jacobians
      Volume element.
      void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
      SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
      -

      Note how we create an additional FEValues object for the reference cell gradients and how we initialize it to the reference cell. The actual derivative data is then applied by the inverse, transposed Jacobians (deal.II calls the Jacobian matrix from real to unit cell inverse_jacobian, as the forward transformation is from unit to real cell). The factor $J_\mathrm{cell}^{-1} D_\mathrm{cell} J_\mathrm{cell}^{-\mathrm T}$ is block-diagonal over quadrature. In this form, one realizes that variable coefficients (possibly expressed through a tensor) and general grid topologies with Jacobian transformations have a similar effect on the coefficient transforming the unit-cell derivatives.

      -

      At this point, one might wonder why we store the matrix $J_\mathrm{cell}^{-\mathrm T}$ and the coefficient separately, rather than only the complete factor $J_\mathrm{cell}^{-1} D_\mathrm{cell}
-J_\mathrm{cell}^{-\mathrm T}$. The latter would use less memory because the tensor is symmetric with six independent values in 3D, whereas for the former we would need nine entries for the inverse transposed Jacobian, one for the quadrature weight and Jacobian determinant, and one for the coefficient, totaling to 11 doubles. The reason is that the former approach allows for implementing generic differential operators through a common framework of cached data, whereas the latter specifically stores the coefficient for the Laplacian. In case applications demand for it, this specialization could pay off and would be worthwhile to consider. Note that the implementation in deal.II is smart enough to detect Cartesian or affine geometries where the Jacobian is constant throughout the cell and needs not be stored for every cell (and indeed often is the same over different cells as well).

      -

      The final optimization that is most crucial from an operation count point of view is to make use of the tensor product structure in the basis functions. This is possible because we have factored out the gradient from the reference cell operation described by Bref_cell, i.e., an interpolation operation over the completely regular data fields of the reference cell. We illustrate the process of complexity reduction in two space dimensions, but the same technique can be used in higher dimensions. On the reference cell, the basis functions are of the tensor product form $\phi(x,y) = \varphi_i(x) \varphi_j(y)$. The part of the matrix Bref_cell that computes the first component has the form $B_\mathrm{sub\_cell}^x = B_\mathrm{grad,x} \otimes B_\mathrm{val,y}$, where Bgrad,x and Bval,y contain the evaluation of all the 1D basis functions on all the 1D quadrature points. Forming a matrix U with U(j,i) containing the coefficient belonging to basis function $\varphi_i(x) \varphi_j(y)$, we get $(B_\mathrm{grad,x} \otimes
-B_\mathrm{val,y})u_\mathrm{cell} = B_\mathrm{val,y} U B_\mathrm{grad,x}$. This reduces the complexity for computing this product from $p^4$ to $2 p^3$, where p-1 is the degree of the finite element (i.e., equivalently, p is the number of shape functions in each coordinate direction), or $p^{2d}$ to $d p^{d+1}$ in general. The reason why we look at the complexity in terms of the polynomial degree is since we want to be able to go to high degrees and possibly increase the polynomial degree p instead of the grid resolution. Good algorithms for moderate degrees like the ones used here are linear in the polynomial degree independent on the dimension, as opposed to matrix-based schemes or naive evaluation through FEValues. The techniques used in the implementations of deal.II have been established in the spectral element community since the 1980s.

      +

      Note how we create an additional FEValues object for the reference cell gradients and how we initialize it to the reference cell. The actual derivative data is then applied by the inverse, transposed Jacobians (deal.II calls the Jacobian matrix from real to unit cell inverse_jacobian, as the forward transformation is from unit to real cell). The factor $J_\mathrm{cell}^{-1} D_\mathrm{cell} J_\mathrm{cell}^{-\mathrm T}$ is block-diagonal over quadrature. In this form, one realizes that variable coefficients (possibly expressed through a tensor) and general grid topologies with Jacobian transformations have a similar effect on the coefficient transforming the unit-cell derivatives.

      +

      At this point, one might wonder why we store the matrix $J_\mathrm{cell}^{-\mathrm T}$ and the coefficient separately, rather than only the complete factor $J_\mathrm{cell}^{-1} D_\mathrm{cell}
+J_\mathrm{cell}^{-\mathrm T}$. The latter would use less memory because the tensor is symmetric with six independent values in 3D, whereas for the former we would need nine entries for the inverse transposed Jacobian, one for the quadrature weight and Jacobian determinant, and one for the coefficient, totaling to 11 doubles. The reason is that the former approach allows for implementing generic differential operators through a common framework of cached data, whereas the latter specifically stores the coefficient for the Laplacian. In case applications demand for it, this specialization could pay off and would be worthwhile to consider. Note that the implementation in deal.II is smart enough to detect Cartesian or affine geometries where the Jacobian is constant throughout the cell and needs not be stored for every cell (and indeed often is the same over different cells as well).

      +

      The final optimization that is most crucial from an operation count point of view is to make use of the tensor product structure in the basis functions. This is possible because we have factored out the gradient from the reference cell operation described by Bref_cell, i.e., an interpolation operation over the completely regular data fields of the reference cell. We illustrate the process of complexity reduction in two space dimensions, but the same technique can be used in higher dimensions. On the reference cell, the basis functions are of the tensor product form $\phi(x,y) = \varphi_i(x) \varphi_j(y)$. The part of the matrix Bref_cell that computes the first component has the form $B_\mathrm{sub\_cell}^x = B_\mathrm{grad,x} \otimes B_\mathrm{val,y}$, where Bgrad,x and Bval,y contain the evaluation of all the 1D basis functions on all the 1D quadrature points. Forming a matrix U with U(j,i) containing the coefficient belonging to basis function $\varphi_i(x) \varphi_j(y)$, we get $(B_\mathrm{grad,x} \otimes
+B_\mathrm{val,y})u_\mathrm{cell} = B_\mathrm{val,y} U B_\mathrm{grad,x}$. This reduces the complexity for computing this product from $p^4$ to $2 p^3$, where p-1 is the degree of the finite element (i.e., equivalently, p is the number of shape functions in each coordinate direction), or $p^{2d}$ to $d p^{d+1}$ in general. The reason why we look at the complexity in terms of the polynomial degree is since we want to be able to go to high degrees and possibly increase the polynomial degree p instead of the grid resolution. Good algorithms for moderate degrees like the ones used here are linear in the polynomial degree independent on the dimension, as opposed to matrix-based schemes or naive evaluation through FEValues. The techniques used in the implementations of deal.II have been established in the spectral element community since the 1980s.

      Implementing a matrix-free and cell-based finite element operator requires a somewhat different program design as compared to the usual matrix assembly codes shown in previous tutorial programs. The data structures for doing this are the MatrixFree class that collects all data and issues a (parallel) loop over all cells and the FEEvaluation class that evaluates finite element basis functions by making use of the tensor product structure.

      The implementation of the matrix-free matrix-vector product shown in this tutorial is slower than a matrix-vector product using a sparse matrix for linear elements, but faster for all higher order elements thanks to the reduced complexity due to the tensor product structure and due to less memory transfer during computations. The impact of reduced memory transfer is particularly beneficial when working on a multicore processor where several processing units share access to memory. In that case, an algorithm which is computation bound will show almost perfect parallel speedup (apart from possible changes of the processor's clock frequency through turbo modes depending on how many cores are active), whereas an algorithm that is bound by memory transfer might not achieve similar speedup (even when the work is perfectly parallel and one could expect perfect scaling like in sparse matrix-vector products). An additional gain with this implementation is that we do not have to build the sparse matrix itself, which can also be quite expensive depending on the underlying differential equation. Moreover, the above framework is simple to generalize to nonlinear operations, as we demonstrate in step-48.

      Combination with multigrid

      @@ -424,14 +424,14 @@
       
       
      -

      To be efficient, the operations performed in the matrix-free implementation require knowledge of loop lengths at compile time, which are given by the degree of the finite element. Hence, we collect the values of the two template parameters that can be changed at one place in the code. Of course, one could make the degree of the finite element a run-time parameter by compiling the computational kernels for all degrees that are likely (say, between 1 and 6) and selecting the appropriate kernel at run time. Here, we simply choose second order $Q_2$ elements and choose dimension 3 as standard.

      +

      To be efficient, the operations performed in the matrix-free implementation require knowledge of loop lengths at compile time, which are given by the degree of the finite element. Hence, we collect the values of the two template parameters that can be changed at one place in the code. Of course, one could make the degree of the finite element a run-time parameter by compiling the computational kernels for all degrees that are likely (say, between 1 and 6) and selecting the appropriate kernel at run time. Here, we simply choose second order $Q_2$ elements and choose dimension 3 as standard.

        const unsigned int degree_finite_element = 2;
        const unsigned int dimension = 3;
       
       

      Equation data

      -

      We define a variable coefficient function for the Poisson problem. It is similar to the function in step-5 but we use the form $a(\mathbf
-   x)=\frac{1}{0.05 + 2\|\bf x\|^2}$ instead of a discontinuous one. It is merely to demonstrate the possibilities of this implementation, rather than making much sense physically. We define the coefficient in the same way as functions in earlier tutorial programs. There is one new function, namely a value method with template argument number.

      +

      We define a variable coefficient function for the Poisson problem. It is similar to the function in step-5 but we use the form $a(\mathbf
+   x)=\frac{1}{0.05 + 2\|\bf x\|^2}$ instead of a discontinuous one. It is merely to demonstrate the possibilities of this implementation, rather than making much sense physically. We define the coefficient in the same way as functions in earlier tutorial programs. There is one new function, namely a value method with template argument number.

        template <int dim>
        class Coefficient : public Function<dim>
        {
      @@ -570,15 +570,15 @@
    • Tell the FEEvaluation object the (macro) cell we want to work on.
    • -Read in the values of the source vectors (read_dof_values), including the resolution of constraints. This stores $u_\mathrm{cell}$ as described in the introduction.
    • +Read in the values of the source vectors (read_dof_values), including the resolution of constraints. This stores $u_\mathrm{cell}$ as described in the introduction.
    • -Compute the unit-cell gradient (the evaluation of finite element functions). Since FEEvaluation can combine value computations with gradient computations, it uses a unified interface to all kinds of derivatives of order between zero and two. We only want gradients, no values and no second derivatives, so we set the function arguments to true in the gradient slot (second slot), and to false in the values slot (first slot). There is also a third slot for the Hessian which is false by default, so it needs not be given. Note that the FEEvaluation class internally evaluates shape functions in an efficient way where one dimension is worked on at a time (using the tensor product form of shape functions and quadrature points as mentioned in the introduction). This gives complexity equal to $\mathcal O(d^2 (p+1)^{d+1})$ for polynomial degree $p$ in $d$ dimensions, compared to the naive approach with loops over all local degrees of freedom and quadrature points that is used in FEValues and costs $\mathcal O(d (p+1)^{2d})$.
    • +Compute the unit-cell gradient (the evaluation of finite element functions). Since FEEvaluation can combine value computations with gradient computations, it uses a unified interface to all kinds of derivatives of order between zero and two. We only want gradients, no values and no second derivatives, so we set the function arguments to true in the gradient slot (second slot), and to false in the values slot (first slot). There is also a third slot for the Hessian which is false by default, so it needs not be given. Note that the FEEvaluation class internally evaluates shape functions in an efficient way where one dimension is worked on at a time (using the tensor product form of shape functions and quadrature points as mentioned in the introduction). This gives complexity equal to $\mathcal O(d^2 (p+1)^{d+1})$ for polynomial degree $p$ in $d$ dimensions, compared to the naive approach with loops over all local degrees of freedom and quadrature points that is used in FEValues and costs $\mathcal O(d (p+1)^{2d})$.
    • Next comes the application of the Jacobian transformation, the multiplication by the variable coefficient and the quadrature weight. FEEvaluation has an access function get_gradient that applies the Jacobian and returns the gradient in real space. Then, we just need to multiply by the (scalar) coefficient, and let the function submit_gradient apply the second Jacobian (for the test function) and the quadrature weight and Jacobian determinant (JxW). Note that the submitted gradient is stored in the same data field as where it is read from in get_gradient. Therefore, you need to make sure to not read from the same quadrature point again after having called submit_gradient on that particular quadrature point. In general, it is a good idea to copy the result of get_gradient when it is used more often than once.
    • Next follows the summation over quadrature points for all test functions that corresponds to the actual integration step. For the Laplace operator, we just multiply by the gradient, so we call the integrate function with the respective argument set. If you have an equation where you test by both the values of the test functions and the gradients, both template arguments need to be set to true. Calling first the integrate function for values and then gradients in a separate call leads to wrong results, since the second call will internally overwrite the results from the first call. Note that there is no function argument for the second derivative for integrate step.
    • -Eventually, the local contributions in the vector $v_\mathrm{cell}$ as mentioned in the introduction need to be added into the result vector (and constraints are applied). This is done with a call to distribute_local_to_global, the same name as the corresponding function in the AffineConstraints (only that we now store the local vector in the FEEvaluation object, as are the indices between local and global degrees of freedom).
    • +Eventually, the local contributions in the vector $v_\mathrm{cell}$ as mentioned in the introduction need to be added into the result vector (and constraints are applied). This is done with a call to distribute_local_to_global, the same name as the corresponding function in the AffineConstraints (only that we now store the local vector in the FEEvaluation object, as are the indices between local and global degrees of freedom).
        template <int dim, int fe_degree, typename number>
        void LaplaceOperator<dim, fe_degree, number>::local_apply(
      @@ -662,9 +662,9 @@
      #define Assert(cond, exc)

      In the local compute loop, we compute the diagonal by a loop over all columns in the local matrix and putting the entry 1 in the ith slot and a zero entry in all other slots, i.e., we apply the cell-wise differential operator on one unit vector at a time. The inner part invoking FEEvaluation::evaluate(), the loop over quadrature points, and FEEvaluation::integrate(), is exactly the same as in the local_apply function. Afterwards, we pick out the ith entry of the local result and put it to a temporary storage (as we overwrite all entries in the array behind FEEvaluation::get_dof_value() with the next loop iteration). Finally, the temporary storage is written to the destination vector. Note how we use FEEvaluation::get_dof_value() and FEEvaluation::submit_dof_value() to read and write to the data field that FEEvaluation uses for the integration on the one hand and writes into the global vector on the other hand.

      -

      Given that we are only interested in the matrix diagonal, we simply throw away all other entries of the local matrix that have been computed along the way. While it might seem wasteful to compute the complete cell matrix and then throw away everything but the diagonal, the integration are so efficient that the computation does not take too much time. Note that the complexity of operator evaluation per element is $\mathcal
-   O((p+1)^{d+1})$ for polynomial degree $k$, so computing the whole matrix costs us $\mathcal O((p+1)^{2d+1})$ operations, not too far away from $\mathcal O((p+1)^{2d})$ complexity for computing the diagonal with FEValues. Since FEEvaluation is also considerably faster due to vectorization and other optimizations, the diagonal computation with this function is actually the fastest (simple) variant. (It would be possible to compute the diagonal with sum factorization techniques in $\mathcal
-   O((p+1)^{d+1})$ operations involving specifically adapted kernels—but since such kernels are only useful in that particular context and the diagonal computation is typically not on the critical path, they have not been implemented in deal.II.)

      +

      Given that we are only interested in the matrix diagonal, we simply throw away all other entries of the local matrix that have been computed along the way. While it might seem wasteful to compute the complete cell matrix and then throw away everything but the diagonal, the integration are so efficient that the computation does not take too much time. Note that the complexity of operator evaluation per element is $\mathcal
+   O((p+1)^{d+1})$ for polynomial degree $k$, so computing the whole matrix costs us $\mathcal O((p+1)^{2d+1})$ operations, not too far away from $\mathcal O((p+1)^{2d})$ complexity for computing the diagonal with FEValues. Since FEEvaluation is also considerably faster due to vectorization and other optimizations, the diagonal computation with this function is actually the fastest (simple) variant. (It would be possible to compute the diagonal with sum factorization techniques in $\mathcal
+   O((p+1)^{d+1})$ operations involving specifically adapted kernels—but since such kernels are only useful in that particular context and the diagonal computation is typically not on the critical path, they have not been implemented in deal.II.)

      Note that the code that calls distribute_local_to_global on the vector to accumulate the diagonal entries into the global matrix has some limitations. For operators with hanging node constraints that distribute an integral contribution of a constrained DoF to several other entries inside the distribute_local_to_global call, the vector interface used here does not exactly compute the diagonal entries, but lumps some contributions located on the diagonal of the local matrix that would end up in a off-diagonal position of the global matrix to the diagonal. The result is correct up to discretization accuracy as explained in Kormann (2016), section 5.3, but not mathematically equal. In this tutorial program, no harm can happen because the diagonal is only used for the multigrid level matrices where no hanging node constraints appear.

        template <int dim, int fe_degree, typename number>
        void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
      @@ -945,7 +945,7 @@
        time.restart();
       
      -

      As a smoother, this tutorial program uses a Chebyshev iteration instead of SOR in step-16. (SOR would be very difficult to implement because we do not have the matrix elements available explicitly, and it is difficult to make it work efficiently in parallel.) The smoother is initialized with our level matrices and the mandatory additional data for the Chebyshev smoother. We use a relatively high degree here (5), since matrix-vector products are comparably cheap. We choose to smooth out a range of $[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]$ in the smoother where $\hat{\lambda}_{\max}$ is an estimate of the largest eigenvalue (the factor 1.2 is applied inside PreconditionChebyshev). In order to compute that eigenvalue, the Chebyshev initialization performs a few steps of a CG algorithm without preconditioner. Since the highest eigenvalue is usually the easiest one to find and a rough estimate is enough, we choose 10 iterations. Finally, we also set the inner preconditioner type in the Chebyshev method which is a Jacobi iteration. This is represented by the DiagonalMatrix class that gets the inverse diagonal entry provided by our LaplaceOperator class.

      +

      As a smoother, this tutorial program uses a Chebyshev iteration instead of SOR in step-16. (SOR would be very difficult to implement because we do not have the matrix elements available explicitly, and it is difficult to make it work efficiently in parallel.) The smoother is initialized with our level matrices and the mandatory additional data for the Chebyshev smoother. We use a relatively high degree here (5), since matrix-vector products are comparably cheap. We choose to smooth out a range of $[1.2 \hat{\lambda}_{\max}/15,1.2 \hat{\lambda}_{\max}]$ in the smoother where $\hat{\lambda}_{\max}$ is an estimate of the largest eigenvalue (the factor 1.2 is applied inside PreconditionChebyshev). In order to compute that eigenvalue, the Chebyshev initialization performs a few steps of a CG algorithm without preconditioner. Since the highest eigenvalue is usually the easiest one to find and a rough estimate is enough, we choose 10 iterations. Finally, we also set the inner preconditioner type in the Chebyshev method which is a Jacobi iteration. This is represented by the DiagonalMatrix class that gets the inverse diagonal entry provided by our LaplaceOperator class.

      On level zero, we initialize the smoother differently because we want to use the Chebyshev iteration as a solver. PreconditionChebyshev allows the user to switch to solver mode where the number of iterations is internally chosen to the correct value. In the additional data object, this setting is activated by choosing the polynomial degree to numbers::invalid_unsigned_int. The algorithm will then attack all eigenvalues between the smallest and largest one in the coarse level matrix. The number of steps in the Chebyshev smoother are chosen such that the Chebyshev convergence estimates guarantee to reduce the residual by the number specified in the variable smoothing_range. Note that for solving, smoothing_range is a relative tolerance and chosen smaller than one, in this case, we select three orders of magnitude, whereas it is a number larger than 1 when only selected eigenvalues are smoothed.

      From a computational point of view, the Chebyshev iteration is a very attractive coarse grid solver as long as the coarse size is moderate. This is because the Chebyshev method performs only matrix-vector products and vector updates, which typically parallelize better to the largest cluster size with more than a few tens of thousands of cores than inner product involved in other iterative methods. The former involves only local communication between neighbors in the (coarse) mesh, whereas the latter requires global communication over all processors.

        using SmootherType =
      @@ -1160,7 +1160,7 @@

      Program output

      Since this example solves the same problem as step-5 (except for a different coefficient), there is little to say about the solution. We show a picture anyway, illustrating the size of the solution through both isocontours and volume rendering:

      -

      Of more interest is to evaluate some aspects of the multigrid solver. When we run this program in 2D for quadratic ( $Q_2$) elements, we get the following output (when run on one core in release mode):

      Vectorization over 2 doubles = 128 bits (SSE2)
      +

      Of more interest is to evaluate some aspects of the multigrid solver. When we run this program in 2D for quadratic ( $Q_2$) elements, we get the following output (when run on one core in release mode):

      Vectorization over 2 doubles = 128 bits (SSE2)
      Cycle 0
      Number of degrees of freedom: 81
      Total setup time (wall) 0.00159788s
      @@ -1227,7 +1227,7 @@
      Number of degrees of freedom: 2146689
      Total setup time (wall) 4.96491s
      Time solve (6 iterations) (CPU/wall) 3.53126s/3.56142s
      -

      Since it is so easy, we look at what happens if we increase the polynomial degree. When selecting the degree as four in 3D, i.e., on $\mathcal Q_4$ elements, by changing the line const unsigned int degree_finite_element=4; at the top of the program, we get the following program output:

      +

      Since it is so easy, we look at what happens if we increase the polynomial degree. When selecting the degree as four in 3D, i.e., on $\mathcal Q_4$ elements, by changing the line const unsigned int degree_finite_element=4; at the top of the program, we get the following program output:

      Vectorization over 2 doubles = 128 bits (SSE2)
      Cycle 0
      Number of degrees of freedom: 729
      @@ -1258,7 +1258,7 @@
      Number of degrees of freedom: 16974593
      Total setup time (wall) 27.8989s
      Time solve (7 iterations) (CPU/wall) 26.3705s/27.1077s
      -

      Since $\mathcal Q_4$ elements on a certain mesh correspond to $\mathcal Q_2$ elements on half the mesh size, we can compare the run time at cycle 4 with fourth degree polynomials with cycle 5 using quadratic polynomials, both at 2.1 million degrees of freedom. The surprising effect is that the solver for $\mathcal Q_4$ element is actually slightly faster than for the quadratic case, despite using one more linear iteration. The effect that higher-degree polynomials are similarly fast or even faster than lower degree ones is one of the main strengths of matrix-free operator evaluation through sum factorization, see the matrix-free paper. This is fundamentally different to matrix-based methods that get more expensive per unknown as the polynomial degree increases and the coupling gets denser.

      +

      Since $\mathcal Q_4$ elements on a certain mesh correspond to $\mathcal Q_2$ elements on half the mesh size, we can compare the run time at cycle 4 with fourth degree polynomials with cycle 5 using quadratic polynomials, both at 2.1 million degrees of freedom. The surprising effect is that the solver for $\mathcal Q_4$ element is actually slightly faster than for the quadratic case, despite using one more linear iteration. The effect that higher-degree polynomials are similarly fast or even faster than lower degree ones is one of the main strengths of matrix-free operator evaluation through sum factorization, see the matrix-free paper. This is fundamentally different to matrix-based methods that get more expensive per unknown as the polynomial degree increases and the coupling gets denser.

      /usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 2024-11-15 06:44:29.983676525 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_38.html 2024-11-15 06:44:29.983676525 +0000 @@ -143,52 +143,52 @@ This material is based upon work supported by the National Science Foundation under Grant No. DMS-0914977. Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation (NSF).

      Introduction

      -

      In this example, we show how to solve a partial differential equation (PDE) on a codimension one surface $\Gamma \subset \mathbb R^3$ made of quadrilaterals, i.e. on a surface in 3d or a line in 2d. We focus on the following elliptic second order PDE

      -\begin{align*}
+<p>In this example, we show how to solve a partial differential equation (PDE) on a codimension one surface <picture><source srcset=$\Gamma \subset \mathbb R^3$ made of quadrilaterals, i.e. on a surface in 3d or a line in 2d. We focus on the following elliptic second order PDE

      +\begin{align*}
 -\Delta_\Gamma u &= f \qquad \text{on } \qquad \Gamma,\\
 u  &= g \qquad \text{on} \qquad \partial \Gamma,
-\end{align*} +\end{align*}" src="form_4769.png"/>

      which generalized the Laplace equation we have previously solved in several of the early tutorial programs. Our implementation is based on step-4. step-34 also solves problems on lower dimensional surfaces; however, there we only consider integral equations that do not involve derivatives on the solution variable, while here we actually have to investigate what it means to take derivatives of a function only defined on a (possibly curved) surface.

      -

      In order to define the above operator, we start by introducing some notations. Let $\mathbf x_S:\hat S \rightarrow S$ be a parameterization of a surface $S$ from a reference element $\hat S \subset \mathbb R^2$, i.e. each point $\hat{\mathbf x}\in\hat S$ induces a point ${\mathbf
-  x}_S(\hat{\mathbf x}) \in S$. Then let

      -\[
+<p>In order to define the above operator, we start by introducing some notations. Let <picture><source srcset=$\mathbf x_S:\hat S \rightarrow S$ be a parameterization of a surface $S$ from a reference element $\hat S \subset \mathbb R^2$, i.e. each point $\hat{\mathbf x}\in\hat S$ induces a point ${\mathbf
+  x}_S(\hat{\mathbf x}) \in S$. Then let

      +\[
 G_S\dealcoloneq (D \mathbf{x}_S)^T \ D \mathbf{x}_S
-\] +\]" src="form_4774.png"/>

      -

      denotes the corresponding first fundamental form, where $D
-\mathbf{x}_S=\left(\frac{\partial x_{S,i}(\hat{\mathbf x})}{\partial \hat x_j}\right)_{ij}$ is the derivative (Jacobian) of the mapping. In the following, $S$ will be either the entire surface $\Gamma$ or, more convenient for the finite element method, any face $S \in
-{\mathbb T}$, where ${\mathbb T}$ is a partition (triangulation) of $\Gamma$ constituted of quadrilaterals. We are now in position to define the tangential gradient of a function $v : S \rightarrow \mathbb
-R$ by

      -\[
+<p> denotes the corresponding first fundamental form, where  <picture><source srcset=$D
+\mathbf{x}_S=\left(\frac{\partial x_{S,i}(\hat{\mathbf x})}{\partial \hat x_j}\right)_{ij}$ is the derivative (Jacobian) of the mapping. In the following, $S$ will be either the entire surface $\Gamma$ or, more convenient for the finite element method, any face $S \in
+{\mathbb T}$, where ${\mathbb T}$ is a partition (triangulation) of $\Gamma$ constituted of quadrilaterals. We are now in position to define the tangential gradient of a function $v : S \rightarrow \mathbb
+R$ by

      +\[
 (\nabla_S v)\circ \mathbf x_S \dealcoloneq  D \mathbf x_S \ G_S^{-1} \ \nabla (v \circ \mathbf x_S).
-\] +\]" src="form_4779.png"/>

      -

      The surface Laplacian (also called the Laplace-Beltrami operator) is then defined as $\Delta_S \dealcoloneq \nabla_S \cdot \nabla_S$. Note that an alternate way to compute the surface gradient on smooth surfaces $\Gamma$ is

      -\[
+<p> The surface Laplacian (also called the Laplace-Beltrami operator) is then defined as <picture><source srcset=$\Delta_S \dealcoloneq \nabla_S \cdot \nabla_S$. Note that an alternate way to compute the surface gradient on smooth surfaces $\Gamma$ is

      +\[
 \nabla_S v = \nabla \tilde v - \mathbf n (\mathbf n \cdot \nabla \tilde v),
-\] +\]" src="form_4781.png"/>

      -

      where $\tilde v$ is a "smooth" extension of $v$ in a tubular neighborhood of $\Gamma$ and $\mathbf n$ is the normal of $\Gamma$. Since $\Delta_S = \nabla_S \cdot \nabla_S$, we deduce

      -\[
+<p> where <picture><source srcset=$\tilde v$ is a "smooth" extension of $v$ in a tubular neighborhood of $\Gamma$ and $\mathbf n$ is the normal of $\Gamma$. Since $\Delta_S = \nabla_S \cdot \nabla_S$, we deduce

      +\[
 \Delta_S v = \Delta \tilde v - \mathbf n^T \ D^2 \tilde v \ \mathbf n - (\mathbf n \cdot \nabla \tilde v) (\nabla \cdot \mathbf n - \mathbf n^T \ D \mathbf n \ \mathbf n ).
-\] +\]" src="form_4784.png"/>

      -

      Worth mentioning, the term $\nabla \cdot \mathbf n - \mathbf n \ D \mathbf n \ \mathbf n$ appearing in the above expression is the total curvature of the surface (sum of principal curvatures).

      -

      As usual, we are only interested in weak solutions for which we can use $C^0$ finite elements (rather than requiring $C^1$ continuity as for strong solutions). We therefore resort to the weak formulation

      -\[
+<p> Worth mentioning, the term <picture><source srcset=$\nabla \cdot \mathbf n - \mathbf n \ D \mathbf n \ \mathbf n$ appearing in the above expression is the total curvature of the surface (sum of principal curvatures).

      +

      As usual, we are only interested in weak solutions for which we can use $C^0$ finite elements (rather than requiring $C^1$ continuity as for strong solutions). We therefore resort to the weak formulation

      +\[
 \int_\Gamma \nabla_\Gamma u \cdot
 \nabla_\Gamma v = \int_\Gamma f \ v  \qquad \forall v \in H^1_0(\Gamma)
-\] +\]" src="form_4786.png"/>

      -

      and take advantage of the partition ${\mathbb T}$ to further write

      -\[
+<p> and take advantage of the partition <picture><source srcset=${\mathbb T}$ to further write

      +\[
 \sum_{K\in  {\mathbb T}}\int_K \nabla_{K} u \cdot \nabla_{K} v = \sum_{K\in
   {\mathbb T}} \int_K f \ v  \qquad \forall v \in H^1_0(\Gamma).
-\] +\]" src="form_4787.png"/>

      -

      Moreover, each integral in the above expression is computed in the reference element $\hat K \dealcoloneq [0,1]^2$ so that

      -\begin{align*}
+<p> Moreover, each integral in the above expression is computed in the reference element <picture><source srcset=$\hat K \dealcoloneq [0,1]^2$ so that

      +\begin{align*}
 \int_{K} \nabla_{K} u \cdot \nabla_{K} v
 &=
 \int_{\hat K} \nabla (u \circ \mathbf x_K)^T G_K^{-1} (D \mathbf
@@ -198,30 +198,30 @@
 &=
 \int_{\hat K} \nabla (u \circ \mathbf x_K)^T G_K^{-1} \nabla (v \circ \mathbf x_K) \sqrt{\det
     (G_K)}
-\end{align*} +\end{align*}" src="form_4789.png"/>

      and

      -\[
+<picture><source srcset=\[
 \int_{K} f \ v = \int_{\hat K} (f \circ \mathbf x_K) (v \circ \mathbf
 x_K)  \sqrt{\det
     (G_K)}.
-\] +\]" src="form_4790.png"/>

      -

      Finally, we use a quadrature formula defined by points $\{p_l\}_{l=1}^N\subset
-\hat K$ and weights $\{w_l\}_{l=1}^N \subset \mathbb R^+_*$ to evaluate the above integrals and obtain

      -\[\int_{K} \nabla_{K} u \cdot \nabla_{K} v \approx \sum_{l=1}^N
+<p> Finally, we use a quadrature formula defined by points  <picture><source srcset=$\{p_l\}_{l=1}^N\subset
+\hat K$ and weights $\{w_l\}_{l=1}^N \subset \mathbb R^+_*$ to evaluate the above integrals and obtain

      +\[\int_{K} \nabla_{K} u \cdot \nabla_{K} v \approx \sum_{l=1}^N
  (\nabla (u \circ \mathbf x_K)(p_l))^T G^{-1}(p_l)  \nabla (v \circ \mathbf x_K)
 (p_l) \sqrt{\det (G(p_l))} \ w_l
-\] +\]" src="form_4793.png"/>

      and

      -\[
+<picture><source srcset=\[
 \int_{K} f \ v \approx \sum_{l=1}^N (f \circ \mathbf x_K)(p_l) \ (v \circ \mathbf x_K)(p_l) \sqrt{\det (G(p_l))} \ w_l.
-\] +\]" src="form_4794.png"/>

      Fortunately, deal.II has already all the tools to compute the above expressions. In fact, they barely differ from the ways in which we solve the usual Laplacian, only requiring the surface coordinate mapping to be provided in the constructor of the FEValues class. The surface description given, in the codimension one case, the two routines we need are the following:

        -
      • FEValues::shape_grad(i,l), which returns $D \mathbf x_K(p_l) G^{-1}(p_l)D(\varphi_i \circ \mathbf x_K)$
      • -
      • FEValues::JxW(l), which returns $\sqrt{\det (G(p_l))} \ w_l$. This provides exactly the terms we need for our computations.
      • +
      • FEValues::shape_grad(i,l), which returns $D \mathbf x_K(p_l) G^{-1}(p_l)D(\varphi_i \circ \mathbf x_K)$
      • +
      • FEValues::JxW(l), which returns $\sqrt{\det (G(p_l))} \ w_l$. This provides exactly the terms we need for our computations.

      On a more general note, details for the finite element approximation on surfaces can be found for instance in [Dziuk, in Partial differential equations and calculus of variations 1357, Lecture Notes in Math., 1988], [Demlow, SIAM J. Numer. Anal. 47(2), 2009] and [Bonito, Nochetto, and Pauletti, SIAM J. Numer. Anal. 48(5), 2010].

      Testcase

      @@ -229,19 +229,19 @@

      We produce one test case for a 2d problem and another one for 3d:

      • -

        In 2d, let's choose as domain a half circle. On this domain, we choose the function $u(\mathbf x)=-2x_1x_2$ as the solution. To compute the right hand side, we have to compute the surface Laplacian of the solution function. There are (at least) two ways to do that. The first one is to project away the normal derivative as described above using the natural extension of $u(\mathbf x)$ (still denoted by $u$) over $\mathbb R^d$, i.e. to compute

        -\[
+<p class=In 2d, let's choose as domain a half circle. On this domain, we choose the function $u(\mathbf x)=-2x_1x_2$ as the solution. To compute the right hand side, we have to compute the surface Laplacian of the solution function. There are (at least) two ways to do that. The first one is to project away the normal derivative as described above using the natural extension of $u(\mathbf x)$ (still denoted by $u$) over $\mathbb R^d$, i.e. to compute

        +\[
     -\Delta_\Gamma u =  \Delta u - \mathbf n^T \ D^2 u \ \mathbf n - (\mathbf n \cdot \nabla u)\ \kappa,
-  \] + \]" src="form_4799.png"/>

        -

        where $\kappa$ is the total curvature of $\Gamma$. Since we are on the unit circle, $\mathbf n=\mathbf x$ and $\kappa = 1$ so that

        -\[
+<p> where <picture><source srcset=$\kappa$ is the total curvature of $\Gamma$. Since we are on the unit circle, $\mathbf n=\mathbf x$ and $\kappa = 1$ so that

        +\[
     -\Delta_\Gamma u = -8 x_1x_2.
-  \] + \]" src="form_4802.png"/>

        -

        A somewhat simpler way, at least for the current case of a curve in two-dimensional space, is to note that we can map the interval $t \in
-  [0,\pi]$ onto the domain $\Omega$ using the transformation $\mathbf x(t)= \left(\begin{array}{c} \cos t \\ \sin t \end{array}\right)$. At position $\mathbf x=\mathbf x(t)$, the value of the solution is then $u(\mathbf x(t)) = -2\cos t \sin t$. Taking into account that the transformation is length preserving, i.e. a segment of length $dt$ is mapped onto a piece of curve of exactly the same length, the tangential Laplacian then satisfies

        -\begin{align*}
+<p class=A somewhat simpler way, at least for the current case of a curve in two-dimensional space, is to note that we can map the interval $t \in
+  [0,\pi]$ onto the domain $\Omega$ using the transformation $\mathbf x(t)= \left(\begin{array}{c} \cos t \\ \sin t \end{array}\right)$. At position $\mathbf x=\mathbf x(t)$, the value of the solution is then $u(\mathbf x(t)) = -2\cos t \sin t$. Taking into account that the transformation is length preserving, i.e. a segment of length $dt$ is mapped onto a piece of curve of exactly the same length, the tangential Laplacian then satisfies

        +\begin{align*}
     \Delta_\Gamma u
     &= \frac{d^2}{dt^2}(-2\cos t \sin t)
     = -2 \frac{d}{dt}(-\sin^2 t + \cos^2 t)
@@ -250,16 +250,16 @@
     &= 8 \sin t \cos t
     \\
     &= 8 x_1x_2,
-  \end{align*} + \end{align*}" src="form_4807.png"/>

        which is of course the same result as we had above.

      • -In 3d, the domain is again half of the surface of the unit ball, i.e. a half sphere or dome. We choose $u(\mathbf x)=-2\sin(\pi x_1)\cos(\pi x_2)e^{x_3}$ as the solution. We can compute the right hand side of the equation, $f=-\Delta_\Gamma u$, in the same way as the method above (with $\kappa = 2$), yielding an awkward and lengthy expression. You can find the full expression in the source code.
      • +In 3d, the domain is again half of the surface of the unit ball, i.e. a half sphere or dome. We choose $u(\mathbf x)=-2\sin(\pi x_1)\cos(\pi x_2)e^{x_3}$ as the solution. We can compute the right hand side of the equation, $f=-\Delta_\Gamma u$, in the same way as the method above (with $\kappa = 2$), yielding an awkward and lengthy expression. You can find the full expression in the source code.
      -

      In the program, we will also compute the $H^1$ seminorm error of the solution. Since the solution function and its numerical approximation are only defined on the manifold, the obvious definition of this error functional is $| e |_{H^1(\Gamma)}
+<p>In the program, we will also compute the <picture><source srcset=$H^1$ seminorm error of the solution. Since the solution function and its numerical approximation are only defined on the manifold, the obvious definition of this error functional is $| e |_{H^1(\Gamma)}
   = | \nabla_\Gamma e |_{L_2(\Gamma)}
-  = \left( \int_\Gamma | \nabla_\Gamma (u-u_h) |^2 \right)^{1/2}$. This requires us to provide the tangential gradient $\nabla_\Gamma u$ to the function VectorTools::integrate_difference (first introduced in step-7), which we will do by implementing the function Solution::gradient in the program below.

      + = \left( \int_\Gamma | \nabla_\Gamma (u-u_h) |^2 \right)^{1/2}$" src="form_4811.png"/>. This requires us to provide the tangential gradient $\nabla_\Gamma u$ to the function VectorTools::integrate_difference (first introduced in step-7), which we will do by implementing the function Solution::gradient in the program below.

      Implementation

      If you've read through step-4 and understand the discussion above of how solution and right hand side correspond to each other, you will be immediately familiar with this program as well. In fact, there are only two things that are of significance:

        @@ -536,7 +536,7 @@
        return_type extract_boundary_mesh(const MeshType< dim, spacedim > &volume_mesh, MeshType< dim - 1, spacedim > &surface_mesh, const std::set< types::boundary_id > &boundary_ids=std::set< types::boundary_id >())
        void half_hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1.)

        LaplaceBeltramiProblem::assemble_system

        -

        The following is the central function of this program, assembling the matrix that corresponds to the surface Laplacian (Laplace-Beltrami operator). Maybe surprisingly, it actually looks exactly the same as for the regular Laplace operator discussed in, for example, step-4. The key is that the FEValues::shape_grad() function does the magic: It returns the surface gradient $\nabla_K \phi_i(x_q)$ of the $i$th shape function at the $q$th quadrature point. The rest then does not need any changes either:

        +

        The following is the central function of this program, assembling the matrix that corresponds to the surface Laplacian (Laplace-Beltrami operator). Maybe surprisingly, it actually looks exactly the same as for the regular Laplace operator discussed in, for example, step-4. The key is that the FEValues::shape_grad() function does the magic: It returns the surface gradient $\nabla_K \phi_i(x_q)$ of the $i$th shape function at the $q$th quadrature point. The rest then does not need any changes either:

          template <int spacedim>
          void LaplaceBeltramiProblem<spacedim>::assemble_system()
          {
        @@ -805,7 +805,7 @@
        void write_gnuplot(const Triangulation< dim, spacedim > &tria, std::ostream &out, const Mapping< dim, spacedim > *mapping=nullptr) const
        Definition grid_out.cc:4608
        void transform(const Transformation &transformation, Triangulation< dim, spacedim > &triangulation)

        Note that the only essential addition is the line marked with asterisks. It is worth pointing out one other thing here, though: because we detach the manifold description from the surface mesh, whenever we use a mapping object in the rest of the program, it has no curves boundary description to go on any more. Rather, it will have to use the implicit, FlatManifold class that is used on all parts of the domain not explicitly assigned a different manifold object. Consequently, whether we use MappingQ(2), MappingQ(15) or MappingQ1, each cell of our mesh will be mapped using a bilinear approximation.

        -

        All these drawbacks aside, the resulting pictures are still pretty. The only other differences to what's in step-38 is that we changed the right hand side to $f(\mathbf x)=\sin x_3$ and the boundary values (through the Solution class) to $u(\mathbf x)|_{\partial\Omega}=\cos x_3$. Of course, we now no longer know the exact solution, so the computation of the error at the end of LaplaceBeltrami::run will yield a meaningless number.

        +

        All these drawbacks aside, the resulting pictures are still pretty. The only other differences to what's in step-38 is that we changed the right hand side to $f(\mathbf x)=\sin x_3$ and the boundary values (through the Solution class) to $u(\mathbf x)|_{\partial\Omega}=\cos x_3$. Of course, we now no longer know the exact solution, so the computation of the error at the end of LaplaceBeltrami::run will yield a meaningless number.

        The plain program

        /* ------------------------------------------------------------------------
        *
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 2024-11-15 06:44:30.051677132 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_39.html 2024-11-15 06:44:30.051677132 +0000 @@ -234,7 +234,7 @@

        The MeshWorker::loop() function separates what needs to be done for local integration, from the loops over cells and faces. It does this by calling functions that integrate over a cell, a boundary face, or an interior face, and letting them create the local contributions and then in a separate step calling a function that moves these local contributions into the global objects. We will use this approach for computing the matrices, the right hand side, the error estimator, and the actual error computation in the functions below. For each of these operations, we provide a namespace that contains a set of functions for cell, boundary, and interior face contributions.

        All the information needed for these local integration is provided by MeshWorker::DoFInfo<dim> and MeshWorker::IntegrationInfo<dim>. In each case, the functions' signatures is fixed: MeshWorker::loop() wants to call functions with a specific set of arguments, so the signature of the functions cannot be changed.

        The first namespace defining local integrators is responsible for assembling the global matrix as well as the level matrices. On each cell, we integrate the Dirichlet form as well as the Nitsche boundary conditions and the interior penalty fluxes between cells.

        -

        The boundary and flux terms need a penalty parameter, which should be adjusted to the cell size and the polynomial degree. We compute it in two steps: First, we compute on each cell $K_i$ the value $P_i = p_i(p_i+1)/h_i$, where $p_i$ is the polynomial degree on cell $K_i$ and $h_i$ is the length of $K_i$ orthogonal to the current face. Second, if exactly one of the two cells adjacent to the face has children, its penalty is multiplied by two (to account for the fact that the mesh size $h_i$ there is only half that previously computed); it is possible that both adjacent cells are refined, in which case we are integrating over a non-active face and no adjustment is necessary. Finally, we return the average of the two penalty values.

        +

        The boundary and flux terms need a penalty parameter, which should be adjusted to the cell size and the polynomial degree. We compute it in two steps: First, we compute on each cell $K_i$ the value $P_i = p_i(p_i+1)/h_i$, where $p_i$ is the polynomial degree on cell $K_i$ and $h_i$ is the length of $K_i$ orthogonal to the current face. Second, if exactly one of the two cells adjacent to the face has children, its penalty is multiplied by two (to account for the fact that the mesh size $h_i$ there is only half that previously computed); it is possible that both adjacent cells are refined, in which case we are integrating over a non-active face and no adjustment is necessary. Finally, we return the average of the two penalty values.

          namespace MatrixIntegrator
          {
          template <int dim>
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-11-15 06:44:30.095677525 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_4.html 2024-11-15 06:44:30.095677525 +0000 @@ -261,7 +261,7 @@ but I don't actually know any such function with this name and these arguments."

        But back to the concrete case here: For this tutorial, we choose as right hand side the function $4(x^4+y^4)$ in 2d, or $4(x^4+y^4+z^4)$ in 3d. We could write this distinction using an if-statement on the space dimension, but here is a simple way that also allows us to use the same function in 1d (or in 4D, if you should desire to do so), by using a short loop. Fortunately, the compiler knows the size of the loop at compile time (remember that at the time when you define the template, the compiler doesn't know the value of dim, but when it later encounters a statement or declaration RightHandSide<2>, it will take the template, replace all occurrences of dim by 2 and compile the resulting function). In other words, at the time of compiling this function, the number of times the body will be executed is known, and the compiler can minimize the overhead needed for the loop; the result will be as fast as if we had used the formulas above right away.

        -

        The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

        +

        The last thing to note is that a Point<dim> denotes a point in dim-dimensional space, and its individual components (i.e. $x$, $y$, ... coordinates) can be accessed using the () operator (in fact, the [] operator will work just as well) with indices starting at zero as usual in C and C++.

          template <int dim>
          double RightHandSide<dim>::value(const Point<dim> &p,
          const unsigned int /*component*/) const
        @@ -394,7 +394,7 @@
          right_hand_side.value(x_q) * // f(x_q)
          fe_values.JxW(q_index)); // dx
          }
        -

        As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

        +

        As a final remark to these loops: when we assemble the local contributions into cell_matrix(i,j), we have to multiply the gradients of shape functions $i$ and $j$ at point number q_index and multiply it with the scalar weights JxW. This is what actually happens: fe_values.shape_grad(i,q_index) returns a dim dimensional vector, represented by a Tensor<1,dim> object, and the operator* that multiplies it with the result of fe_values.shape_grad(j,q_index) makes sure that the dim components of the two vectors are properly contracted, and the result is a scalar floating point number that then is multiplied with the weights. Internally, this operator* makes sure that this happens correctly for all dim components of the vectors, whether dim be 2, 3, or any other space dimension; from a user's perspective, this is not something worth bothering with, however, making things a lot simpler if one wants to write code dimension independently.

        With the local systems assembled, the transfer into the global matrix and right hand side is done exactly as before, but here we have again merged some loops for efficiency:

          cell->get_dof_indices(local_dof_indices);
          for (const unsigned int i : fe_values.dof_indices())
        @@ -519,7 +519,7 @@ -
        Note
        A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.
        +
        Note
        A final remark on visualization: the idea of visualization is to give insight, which is not the same as displaying information. In particular, it is easy to overload a picture with information, but while it shows more information it makes it also more difficult to glean insight. As an example, the program I used to generate these pictures, VisIt, by default puts tick marks on every axis, puts a big fat label "X Axis" on the $x$ axis and similar for the other axes, shows the file name from which the data was taken in the top left and the name of the user doing so and the time and date on the bottom right. None of this is important here: the axes are equally easy to make out because the tripod at the bottom left is still visible, and we know from the program that the domain is $[-1,1]^3$, so there is no need for tick marks. As a consequence, I have switched off all the extraneous stuff in the picture: the art of visualization is to reduce the picture to those parts that are important to see what one wants to see, but no more.

        Postprocessing: What to do with the solution?

        This tutorial – like most of the other programs – principally only shows how to numerically approximate the solution of a partial differential equation, and then how to visualize this solution graphically. But solving a PDE is of course not the goal in most practical applications (unless you are a numerical methods developer and the method is the goal): We generally want to solve a PDE because we want to extract information from it. Examples for what people are interested in from solutions include the following:

        • Let's say you solve the equations of elasticity (which we will do in step-8), then that's presumably because you want to know about the deformation of an elastic object under a given load. From an engineering perspective, what you then presumably want to learn is the degree of deformation of the object, say at a specific point; or you may want to know the maximum stress in order to determine whether the applied load exceeds the safe maximal stress the material can withstand.
        • @@ -528,28 +528,28 @@

        The point here is that from an engineering perspective, solving the PDE is only the first step. The second step is to evaluate the computed solution in order to extract relevant numbers that allow us to either optimize a design, or to make decisions. This second step is often called "postprocessing the solution".

        This program does not solve a solid or fluid mechanics problem, so we should try to illustrate postprocessing with something that makes sense in the context of the equation we solve here. The Poisson equation in two space dimensions is a model for the vertical deformation of a membrane that is clamped at the boundary and is subject to a vertical force. For this kind of situation, it makes sense to evaluate the average vertical displacement,

        -\[
+<picture><source srcset=\[
   \bar u_h = \frac{\int_\Omega u_h(\mathbf x) \, dx}{|\Omega|},
-\] +\]" src="form_4837.png"/>

        -

        where $|\Omega| = \int_\Omega 1 \, dx$ is the area of the domain. To compute $\bar u_h$, as usual we replace integrals over the domain by a sum of integrals over cells,

        -\[
+<p> where <picture><source srcset=$|\Omega| = \int_\Omega 1 \, dx$ is the area of the domain. To compute $\bar u_h$, as usual we replace integrals over the domain by a sum of integrals over cells,

        +\[
   \int_\Omega u_h(\mathbf x) \, dx
   =
   \sum_K \int_K u_h(\mathbf x) \, dx,
-\] +\]" src="form_4840.png"/>

        and then integrals over cells are approximated by quadrature:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \int_\Omega u_h(\mathbf x) \, dx
   &=
   \sum_K \int_K u_h(\mathbf x) \, dx,
   \\
   &=
   \sum_K \sum_q u_h(\mathbf x_q^K) w_q^K,
-\end{align*} +\end{align*}" src="form_4841.png"/>

        -

        where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

        +

        where $w_q^K$ is the weight of the $q$th quadrature point evaluated on cell $K$. All of this is as always provided by the FEValues class – the entry point for all integrals in deal.II.

        The actual implementation of this is straightforward once you know how to get the values of the solution $u$ at the quadrature points of a cell. This functionality is provided by FEValues::get_function_values(), a function that takes a global vector of nodal values as input and returns a vector of function values at the quadrature points of the current cell. Using this function, to see how it all works together you can place the following code snippet anywhere in the program after the solution has been computed (the output_results() function seems like a good place to also do postprocessing, for example):

        QGauss<dim> quadrature_formula(fe.degree + 1);
        FEValues<dim> fe_values(fe,
        quadrature_formula,
        @@ -572,14 +572,14 @@
        }
        std::cout << " Mean value of u=" << integral_of_u / volume_of_omega
        << std::endl;
        -

        In this code snippet, we also compute the volume (or, since we are currently thinking about a two-dimensional situation: the area) $|\Omega|$ by computing the integral $|\Omega| = \int_\Omega 1 \, dx$ in exactly the same way, via quadrature. (We could avoid having to compute $|\Omega|$ by hand here, using the fact that deal.II has a function for this: GridTools::volume(). That said, it is efficient to compute the two integrals concurrently in the same loop, and so that's what we do.)

        +

        In this code snippet, we also compute the volume (or, since we are currently thinking about a two-dimensional situation: the area) $|\Omega|$ by computing the integral $|\Omega| = \int_\Omega 1 \, dx$ in exactly the same way, via quadrature. (We could avoid having to compute $|\Omega|$ by hand here, using the fact that deal.II has a function for this: GridTools::volume(). That said, it is efficient to compute the two integrals concurrently in the same loop, and so that's what we do.)

        This program of course also solves the same Poisson equation in three space dimensions. In this situation, the Poisson equation is often used as a model for diffusion of either a physical species (say, of ink in a tank of water, or a pollutant in the air) or of energy (specifically, of thermal energy in a solid body). In that context, the quantity

        -\[
+<picture><source srcset=\[
   \Phi_h = \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
-\] +\]" src="form_4844.png"/>

        is the flux of this species or energy across the boundary. (In actual physical models, one would also have to multiply the right hand side by a diffusivity or conductivity constant, but let us ignore this here.) In much the same way as before, we compute such integrals by splitting it over integrals of faces of cells, and then applying quadrature:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Phi_h
   &=
   \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
@@ -593,9 +593,9 @@
   \sum_K
   \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
   \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f,
-\end{align*} +\end{align*}" src="form_4845.png"/>

        -

        where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

        +

        where now $\mathbf x_q^f$ are the quadrature points located on face $f$, and $w_q^f$ are the weights associated with these faces. The second of the sum symbols loops over all faces of cell $K$, but restricted to those that are actually at the boundary.

        This all is easily implemented by the following code that replaces the use of the FEValues class (which is used for integrating over cells – i.e., domain integrals) by the FEFaceValues class (which is used for integrating over faces – i.e., boundary integrals):

        QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
        FEFaceValues<dim> fe_face_values(fe,
        face_quadrature_formula,
        @@ -637,14 +637,14 @@
        30 CG iterations needed to obtain convergence.
        Mean value of u=1.58058
        Flux=-8.29435
        -

        This makes some sense: If you look, for example, at the 2d output above, the solution varies between values of 1 and 2, but with a larger part of the solution closer to one than two; so an average value of 1.33 for the mean value is reasonable. For the flux, recall that $\nabla u \cdot \mathbf n$ is the directional derivative in the normal direction – in other words, how the solution changes as we move from the interior of the domain towards the boundary. If you look at the 2d solution, you will realize that for most parts of the boundary, the solution decreases as we approach the boundary, so the normal derivative is negative – so if we integrate along the boundary, we should expect (and obtain!) a negative value.

        +

        This makes some sense: If you look, for example, at the 2d output above, the solution varies between values of 1 and 2, but with a larger part of the solution closer to one than two; so an average value of 1.33 for the mean value is reasonable. For the flux, recall that $\nabla u \cdot \mathbf n$ is the directional derivative in the normal direction – in other words, how the solution changes as we move from the interior of the domain towards the boundary. If you look at the 2d solution, you will realize that for most parts of the boundary, the solution decreases as we approach the boundary, so the normal derivative is negative – so if we integrate along the boundary, we should expect (and obtain!) a negative value.

        Possibilities for extensions

        There are many ways with which one can play with this program. The simpler ones include essentially all the possibilities already discussed in the Possibilities for extensions in the documentation of step 3, except that you will have to think about whether something now also applies to the 3d case discussed in the current program.

        It is also worthwhile considering the postprocessing options discussed above. The documentation states two numbers (the mean value and the normal flux) for both the 2d and 3d cases. Can we trust these numbers? We have convinced ourselves that at least the mean value is reasonable, and that the sign of the flux is probably correct. But are these numbers accurate?

        -

        A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

        +

        A general rule is that we should never trust a number unless we have verified it in some way. From the theory of finite element methods, we know that as we make the mesh finer and finer, the numerical solution $u_h$ we compute here must converge to the exact solution $u$. As a consequence, we also expect that $\bar u_h \rightarrow \bar u$ and $\Phi_h \rightarrow \Phi$, but that does not mean that for any given mesh $\bar u_h$ or $\Phi_h$ are particularly accurate approximations.

        To test this kind of thing, we have already considered the convergence of a point value in step-3. We can do the same here by selecting how many times the mesh is globally refined in the make_grid() function of this program. For the mean value of the solution, we then get the following numbers:

        - + @@ -661,7 +661,7 @@

        I did not have the patience to run the last two values for the 3d case – one needs quite a fine mesh for this, with correspondingly long run times. But we can be reasonably assured that values around 1.33 (for the 2d case) and 1.58 (for the 3d case) are about right – and at least for engineering applications, three digits of accuracy are good enough.

        The situation looks very different for the flux. Here, we get results such as the following:

        #href_anchor"form_4841_dark.png" media="(prefers-color-scheme: dark)"/>$\bar u_h$ in 2d $\bar u_h$ in 3d
        #href_anchor"form_4839_dark.png" media="(prefers-color-scheme: dark)"/>$\bar u_h$ in 2d $\bar u_h$ in 3d
        4 1.33303 1.58058
        - + @@ -681,15 +681,15 @@
        # of refinements $\Phi_h$ in 2d $\Phi_h$ in 3d
        # of refinements $\Phi_h$ in 2d $\Phi_h$ in 3d
        4 -3.68956 -8.29435

        So this is not great. For the 2d case, we might infer that perhaps a value around -6.4 might be right if we just refine the mesh enough – though 11 refinements already leads to some 4,194,304 cells. In any case, the first number (the one shown in the beginning where we discussed postprocessing) was off by almost a factor of 2!

        For the 3d case, the last number shown was on a mesh with 2,097,152 cells; the next one would have had 8 times as many cells. In any case, the numbers mean that we can't even be sure that the first digit of that last number is correct! In other words, it was worth checking, or we would have just believed all of these numbers. In fact, that last column isn't even doing a particularly good job convincing us that the code might be correctly implemented.

        -

        If you keep reading through the other tutorial programs, you will find many ways to make these sorts of computations more accurate and to come to believe that the flux actually does converge to its correct value. For example, we can dramatically increase the accuracy of the computation by using adaptive mesh refinement (step-6) near the boundary, and in particular by using higher polynomial degree finite elements (also step-6, but also step-7). Using the latter, using cubic elements (polynomial degree 3), we can actually compute the flux pretty accurately even in 3d: $\Phi_h=-19.0148$ with 4 global refinement steps, and $\Phi_h=-19.1533$ with 5 refinement steps. These numbers are already pretty close together and give us a reasonable idea of the first two correct digits of the "true" answer.

        -
        Note
        We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
-    C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}$. The move from values to gradients reduces the convergence rates by one order, and the move from domain to boundary by another half order. Here, then, each refinement step reduces the error not by a factor of 4 any more, by only by a factor of $\sqrt{2} \approx 1.4$. It takes a lot of global refinement steps to reduce the error by, say, a factor ten or hundred, and this is reflected in the very slow convergence evidenced by the table. On the other hand, for cubic elements (i.e., polynomial degree 3), we would get $\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}$ and after reduction by 1.5 orders, we would still have $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
-    C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}$. This rate, ${\cal O}(h^{2.5})$ is still quite rapid, and it is perhaps not surprising that we get much better answers with these higher order elements. This also illustrates that when trying to approximate anything that relates to a gradient of the solution, using linear elements (polynomial degree one) is really not a good choice at all.
        +

        If you keep reading through the other tutorial programs, you will find many ways to make these sorts of computations more accurate and to come to believe that the flux actually does converge to its correct value. For example, we can dramatically increase the accuracy of the computation by using adaptive mesh refinement (step-6) near the boundary, and in particular by using higher polynomial degree finite elements (also step-6, but also step-7). Using the latter, using cubic elements (polynomial degree 3), we can actually compute the flux pretty accurately even in 3d: $\Phi_h=-19.0148$ with 4 global refinement steps, and $\Phi_h=-19.1533$ with 5 refinement steps. These numbers are already pretty close together and give us a reasonable idea of the first two correct digits of the "true" answer.

        +
        Note
        We would be remiss to not also comment on the fact that there are good theoretical reasons why computing the flux accurately appears to be so much more difficult than the average value. This has to do with the fact that finite element theory provides us with the estimate $\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}$ when using the linear elements this program uses – that is, for every global mesh refinement, $h$ is reduced by a factor of two and the error goes down by a factor of 4. Now, the $L_2$ error is not equivalent to the error in the mean value, but the two are related: They are both integrals over the domain, using the value of the solution. We expect the mean value to converge no worse than the $L_2$ norm of the error. At the same time, theory also provides us with this estimate: $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
+    C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}$. The move from values to gradients reduces the convergence rates by one order, and the move from domain to boundary by another half order. Here, then, each refinement step reduces the error not by a factor of 4 any more, by only by a factor of $\sqrt{2} \approx 1.4$. It takes a lot of global refinement steps to reduce the error by, say, a factor ten or hundred, and this is reflected in the very slow convergence evidenced by the table. On the other hand, for cubic elements (i.e., polynomial degree 3), we would get $\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}$ and after reduction by 1.5 orders, we would still have $\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
+    C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}$. This rate, ${\cal O}(h^{2.5})$ is still quite rapid, and it is perhaps not surprising that we get much better answers with these higher order elements. This also illustrates that when trying to approximate anything that relates to a gradient of the solution, using linear elements (polynomial degree one) is really not a good choice at all.
        -In this very specific case, it turns out that we can actually compute the exact value of $\Phi$. This is because for the Poisson equation we compute the solution of here, $-\Delta u = f$, we can integrate over the domain, $-\int_\Omega \Delta u = \int_\Omega f$, and then use that $\Delta = \text{div}\;\text{grad}$; this allows us to use the divergence theorem followed by multiplying by minus one to find $\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f$. The left hand side happens to be $\Phi$. For the specific right hand side $f(x_1,x_2)=4(x_1^4+x_2^4)$ we use in 2d, we then get $-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
-  = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25$, which has a numerical value of exactly -6.4 – right on with our guess above. In 3d, we can do the same and get that the exact value is $-\int_\Omega f =
+In this very specific case, it turns out that we can actually compute the exact value of <picture><source srcset=$\Phi$. This is because for the Poisson equation we compute the solution of here, $-\Delta u = f$, we can integrate over the domain, $-\int_\Omega \Delta u = \int_\Omega f$, and then use that $\Delta = \text{div}\;\text{grad}$; this allows us to use the divergence theorem followed by multiplying by minus one to find $\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f$. The left hand side happens to be $\Phi$. For the specific right hand side $f(x_1,x_2)=4(x_1^4+x_2^4)$ we use in 2d, we then get $-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
+  = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25$, which has a numerical value of exactly -6.4 – right on with our guess above. In 3d, we can do the same and get that the exact value is $-\int_\Omega f =
    -\int_{-1}^{1} \int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4+x_3^4) \; dx_3 \; dx_2\; dx_1
-   = -48\times\frac 25=-19.2$. What we found with cubic elements is then quite close to this exact value. Of course, in practice we almost never have exact values to compare with: If we could compute something on a piece of paper, we wouldn't have to solve the PDE numerically. But these sorts of situations make for excellent test cases that help us verify that our numerical solver works correctly. In many other cases, the literature contains numbers where others have already computed an answer accurately using their own software, and these are also often useful to compare against in verifying the correctness of our codes.
        + = -48\times\frac 25=-19.2$" src="form_4866.png"/>. What we found with cubic elements is then quite close to this exact value. Of course, in practice we almost never have exact values to compare with: If we could compute something on a piece of paper, we wouldn't have to solve the PDE numerically. But these sorts of situations make for excellent test cases that help us verify that our numerical solver works correctly. In many other cases, the literature contains numbers where others have already computed an answer accurately using their own software, and these are also often useful to compare against in verifying the correctness of our codes.

    The plain program

    /* ------------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-11-15 06:44:30.139677917 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_40.html 2024-11-15 06:44:30.139677917 +0000 @@ -150,7 +150,7 @@

    A general overview of how this parallelization happens is described in the Parallel computing with multiple processors using distributed memory documentation topic. You should read it for a top-level overview before reading through the source code of this program. A concise discussion of many terms we will use in the program is also provided in the Distributed Computing paper. It is probably worthwhile reading it for background information on how things work internally in this program.

    Linear algebra

    step-17 and step-18 already used parallel linear algebra classes, but since the current program is the first one that really covers parallel computing, it is probably worth giving a broad overview of parallel linear algebra here as well.

    -

    First, the general mantra mentioned above was that everything has to be distributed. It does not scale if one process (or in fact all processes) have to keep a complete triangulation or even a substantial share of it; it all only works if every one of the $N$ processes in the parallel universe keep at most a small multiple of one $N$th of the triangulation. Similarly, each process can only hold a small multiple of one $N$th of each solution or right hand side vector, and of the system matrix.

    +

    First, the general mantra mentioned above was that everything has to be distributed. It does not scale if one process (or in fact all processes) have to keep a complete triangulation or even a substantial share of it; it all only works if every one of the $N$ processes in the parallel universe keep at most a small multiple of one $N$th of the triangulation. Similarly, each process can only hold a small multiple of one $N$th of each solution or right hand side vector, and of the system matrix.

    To this end, deal.II has acquired interfaces to a number of packages that provide these kind of distributed linear algebra data structures. More specifically, deal.II comes with a number of "sub-packages" that all provide vector, matrix, and linear solver classes that are typically named the same or very similarly, but live in different namespaces:

    • deal.II's own linear algebra classes. These are what we have been using in step-1 to step-6, for example, along with most of the other programs in the deal.II tutorial. These classes are all not parallel in the sense that they do not use MPI, can not subdivide the data among processes, or work on them on processes that cannot access each other's memory directory. (On the other hand, many of these classes actually use multiple threads internally, to use the multiple processor cores available on today's laptops and work stations.) These classes reside in the top-level namespace dealii.
    • Interfaces to the PETSc library's implementations of linear algebra functionality. These are found in namespace PETScWrappers. PETSc is a library that has built a large collection of linear algebra, linear solvers, nonlinear solvers, time steppers, and other functionality that runs on some of the largest machines in the world in parallel, using MPI.
    • @@ -161,13 +161,13 @@

      For the current program, we need to use parallel linear algebra classes to represent the matrix and vectors. Both the PETScWrapper and TrilinosWrapper classes will fit the bill and, depending on whether deal.II was configured with one or the other (or both), the top of the program selects one or the other set of wrappers by putting the respective class names into a namespace LA.

      The testcase

      This program essentially re-solves what we already do in step-6, i.e. it solves the Laplace equation

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\Delta u &= f \qquad &&\text{in}\ \Omega=[0,1]^2, \\
   u &= 0 \qquad &&\text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_4867.png"/>

      The difference of course is now that we want to do so on a mesh that may have a billion cells, with a billion or so degrees of freedom. There is no doubt that doing so is completely silly for such a simple problem, but the point of a tutorial program is, after all, not to do something useful but to show how useful programs can be implemented using deal.II. Be that as it may, to make things at least a tiny bit interesting, we choose the right hand side as a discontinuous function,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   f(x,y)
   =
   \left\{
@@ -176,7 +176,7 @@
     -1 & \text{otherwise},
   \end{array}
   \right.
-\end{align*} +\end{align*}" src="form_4868.png"/>

      so that the solution has a singularity along the sinusoidal line snaking its way through the domain. As a consequence, mesh refinement will be concentrated along this line. You can see this in the mesh picture shown below in the results section.

      Rather than continuing here and giving a long introduction, let us go straight to the program code. If you have read through step-6 and the Parallel computing with multiple processors using distributed memory documentation topic, most of things that are going to happen should be familiar to you already. In fact, comparing the two programs you will notice that the additional effort necessary to make things work in parallel is almost insignificant: the two programs have about the same number of lines of code (though step-6 spends more space on dealing with coefficients and output). In either case, the comments below will only be on the things that set step-40 apart from step-6 and that aren't already covered in the Parallel computing with multiple processors using distributed memory documentation topic.

      @@ -224,7 +224,7 @@
       

    The following, however, will be new or be used in new roles. Let's walk through them. The first of these will provide the tools of the Utilities::System namespace that we will use to query things like the number of processors associated with the current MPI universe, or the number within this universe the processor this job runs on is:

      #href_anchor"fragment">
      #include <deal.II/base/conditional_ostream.h>
    -

    After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory topic, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

    +

    After these preliminaries, here is where it becomes more interesting. As mentioned in the Parallel computing with multiple processors using distributed memory topic, one of the fundamental truths of solving problems on large numbers of processors is that there is no way for any processor to store everything (e.g. information about all cells in the mesh, all degrees of freedom, or the values of all elements of the solution vector). Rather, every processor will own a few of each of these and, if necessary, may know about a few more, for example the ones that are located on cells adjacent to the ones this processor owns itself. We typically call the latter ghost cells, ghost nodes or ghost elements of a vector. The point of this discussion here is that we need to have a way to indicate which elements a particular processor owns or need to know of. This is the realm of the IndexSet class: if there are a total of $N$ cells, degrees of freedom, or vector elements, associated with (non-negative) integral indices $[0,N)$, then both the set of elements the current processor owns as well as the (possibly larger) set of indices it needs to know about are subsets of the set $[0,N)$. IndexSet is a class that stores subsets of this set in an efficient format:

      #href_anchor"el" href="namespaceSparsityTools.html#a6b5444028171035f8ffb3fb5c3f8da08">SparsityTools::distribute_sparsity_pattern. The role of this function will be explained below.

      #include <deal.II/lac/sparsity_tools.h>

    The final two, new header files provide the class parallel::distributed::Triangulation that provides meshes distributed across a potentially very large number of processors, while the second provides the namespace parallel::distributed::GridRefinement that offers functions that can adaptively refine such distributed meshes:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-11-15 06:44:30.187678346 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_41.html 2024-11-15 06:44:30.191678382 +0000 @@ -168,10 +168,10 @@ u(\mathbf x) &\geq g(\mathbf x) & &\quad\text{in } \Omega, \end{align*}" src="form_4871.png"/>

    -

    where $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

    +

    where $u$ is a scalar valued function that denotes the vertical displacement of the membrane. The first equation is called equilibrium condition with a force of areal density $f$. Here, we will consider this force to be gravity. The second one is known as Hooke's Law that says that the stresses $\sigma$ are proportional to the gradient of the displacements $u$ (the proportionality constant, often denoted by $E$, has been set to one here, without loss of generality; if it is constant, it can be put into the right hand side function). At the boundary we have zero Dirichlet conditions. Obviously, the first two equations can be combined to yield $-\Delta u \ge f$.

    Intuitively, gravity acts downward and so $f(\mathbf x)$ is a negative function (we choose $f=-10$ in this program). The first condition then means that the total force acting on the membrane is gravity plus something positive: namely the upward force that the obstacle exerts on the membrane at those places where the two of them are in contact. How big is this additional force? We don't know yet (and neither do we know "where" it actually acts) but it must be so that the membrane doesn't penetrate the obstacle.

    The fourth equality above together with the last inequality forms the obstacle condition which has to hold at every point of the whole domain. The latter of these two means that the membrane must be above the obstacle $g(\mathbf x)$ everywhere. The second to last equation, often called the "complementarity -condition" says that where the membrane is not in contact with the obstacle (i.e., those $\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
+condition$\mathbf x$ where $u(\mathbf x) - g(\mathbf x) \neq 0$), then $-\Delta u=f$ at these locations; in other words, no additional forces act there, as expected. On the other hand, where $u=g$ we can have $-\Delta u-f
 \neq 0$, i.e., there can be additional forces (though there don't have to be: it is possible for the membrane to just touch, not press against, the obstacle).

    Derivation of the variational inequality

    An obvious way to obtain the variational formulation of the obstacle problem is to consider the total potential energy:

    @@ -190,20 +190,20 @@ \end{equation*}" src="form_4880.png"/>

    This set takes care of the third and fifth conditions above (the boundary values and the complementarity condition).

    -

    Consider now the minimizer $u\in G$ of $E$ and any other function $v\in
+<p>Consider now the minimizer <picture><source srcset=$u\in G$ of $E$ and any other function $v\in
 G$. Then the function

    \begin{equation*}
  F(\varepsilon) \dealcoloneq E(u+\varepsilon(v-u)),\quad\varepsilon\in\left[0,1\right],
 \end{equation*}

    -

    takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

    +

    takes its minimum at $\varepsilon = 0$ (because $u$ is a minimizer of the energy functional $E(\cdot)$), so that $F'(0)\geq 0$ for any choice of $v$. Note that $u+\varepsilon(v-u) = (1-\varepsilon)u+\varepsilon v\in G$ because of the convexity of $G$. If we compute $F'(\varepsilon)\vert_{\varepsilon=0}$ it yields the variational formulation we are searching for:

    Find a function $u\in G$ with

    \begin{equation*}
  \left(\nabla u, \nabla(v-u)\right) \geq \left(f,v-u\right) \quad \forall v\in G.
 \end{equation*}

    -

    This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

    -

    On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

    +

    This is the typical form of variational inequalities, where not just $v$ appears in the bilinear form but in fact $v-u$. The reason is this: if $u$ is not constrained, then we can find test functions $v$ in $G$ so that $v-u$ can have any sign. By choosing test functions $v_1,v_2$ so that $v_1-u = -(v_2-u)$ it follows that the inequality can only hold for both $v_1$ and $v_2$ if the two sides are in fact equal, i.e., we obtain a variational equality.

    +

    On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact $v-u\ge 0$. This means that we can't test the equation with both $v-u$ and $-(v-u)$ as above, and so we can no longer conclude that the two sides are in fact equal. Thus, this mimics the way we have discussed the complementarity condition above.

    Formulation as a saddle point problem

    The variational inequality above is awkward to work with. We would therefore like to reformulate it as an equivalent saddle point problem. We introduce a Lagrange multiplier $\lambda$ and the convex cone $K\subset V'$, $V'$ dual space of $V$, $K \dealcoloneq \{\mu\in V': \langle\mu,v\rangle\geq 0,\quad \forall
 v\in V, v \le 0 \}$ of Lagrange multipliers, where $\langle\cdot,\cdot\rangle$ denotes the duality pairing between $V'$ and $V$. Intuitively, $K$ is the cone of all "non-positive @@ -224,21 +224,21 @@

    The existence and uniqueness of $(u,\lambda)\in V\times K$ of this saddle point problem has been stated in Glowinski, Lions and Trémolières: Numerical Analysis of Variational Inequalities, North-Holland, 1981.

    Active Set methods to solve the saddle point problem

    There are different methods to solve the variational inequality. As one possibility you can understand the saddle point problem as a convex quadratic program (QP) with inequality constraints.

    -

    To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

    +

    To get there, let us assume that we discretize both $u$ and $\lambda$ with the same finite element space, for example the usual $Q_k$ spaces. We would then get the equations

    \begin{eqnarray*}
  &A U + B\Lambda = F,&\\
  &[BU-G]_i \geq 0, \quad \Lambda_i \leq 0,\quad \Lambda_i[BU-G]_i = 0
 \qquad \forall i.&
 \end{eqnarray*}

    -

    where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

    +

    where $B$ is the mass matrix on the chosen finite element space and the indices $i$ above are for all degrees of freedom in the set $\cal S$ of degrees of freedom located in the interior of the domain (we have Dirichlet conditions on the perimeter). However, we can make our life simpler if we use a particular quadrature rule when assembling all terms that yield this mass matrix, namely a quadrature formula where quadrature points are only located at the interpolation points at which shape functions are defined; since all but one shape function are zero at these locations, we get a diagonal mass matrix with

    \begin{align*}
   B_{ii} = \int_\Omega \varphi_i(\mathbf x)^2\ \textrm{d}x,
   \qquad
   B_{ij}=0 \ \text{for } i\neq j.
 \end{align*}

    -

    To define $G$ we use the same technique as for $B$. In other words, we define

    +

    To define $G$ we use the same technique as for $B$. In other words, we define

    \begin{align*}
   G_{i} = \int_\Omega g_h(x) \varphi_i(\mathbf x)\ \textrm{d}x,
 \end{align*} @@ -250,7 +250,7 @@ \qquad \forall i\in{\cal S}.& \end{eqnarray*}" src="form_4913.png"/>

    -

    Now we define for each degree of freedom $i$ the function

    +

    Now we define for each degree of freedom $i$ the function

    \begin{equation*}
  C([BU]_i,\Lambda_i) \dealcoloneq -\Lambda_i + \min\lbrace 0, \Lambda_i + c([BU]_i - G_i) \rbrace,
 \end{equation*} @@ -261,7 +261,7 @@ C([BU]_i,\Lambda_i) = 0, \qquad \forall i\in{\cal S}. \end{equation*}" src="form_4918.png"/>

    -

    The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

    +

    The primal-dual active set strategy we will use here is an iterative scheme which is based on this condition to predict the next active and inactive sets $\mathcal{A}_k$ and $\mathcal{F}_k$ (that is, those complementary sets of indices $i$ for which $U_i$ is either equal to or not equal to the value of the obstacle $B^{-1}G$). For a more in depth treatment of this approach, see Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.

    The primal-dual active set algorithm

    The algorithm for the primal-dual active set method works as follows (NOTE: $B = B^T$):

      @@ -451,7 +451,7 @@
       

    The ObstacleProblem class template

    -

    This class supplies all function and variables needed to describe the obstacle problem. It is close to what we had to do in step-4, and so relatively simple. The only real new components are the update_solution_and_constraints function that computes the active set and a number of variables that are necessary to describe the original (unconstrained) form of the linear system (complete_system_matrix and complete_system_rhs) as well as the active set itself and the diagonal of the mass matrix $B$ used in scaling Lagrange multipliers in the active set formulation. The rest is as in step-4:

    +

    This class supplies all function and variables needed to describe the obstacle problem. It is close to what we had to do in step-4, and so relatively simple. The only real new components are the update_solution_and_constraints function that computes the active set and a number of variables that are necessary to describe the original (unconstrained) form of the linear system (complete_system_matrix and complete_system_rhs) as well as the active set itself and the diagonal of the mass matrix $B$ used in scaling Lagrange multipliers in the active set formulation. The rest is as in step-4:

      template <int dim>
      class ObstacleProblem
      {
    @@ -495,7 +495,7 @@
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation

    Right hand side, boundary values, and the obstacle

    -

    In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

    +

    In the following, we define classes that describe the right hand side function, the Dirichlet boundary values, and the height of the obstacle as a function of $\mathbf x$. In all three cases, we derive these classes from Function<dim>, although in the case of RightHandSide and Obstacle this is more out of convention than necessity since we never pass such objects to the library. In any case, the definition of the right hand side and boundary values classes is obvious given our choice of $f=-10$, $u|_{\partial\Omega}=0$:

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -620,7 +620,7 @@
    void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
    -

    The only other thing to do here is to compute the factors in the $B$ matrix which is used to scale the residual. As discussed in the introduction, we'll use a little trick to make this mass matrix diagonal, and in the following then first compute all of this as a matrix and then extract the diagonal elements for later use:

    +

    The only other thing to do here is to compute the factors in the $B$ matrix which is used to scale the residual. As discussed in the introduction, we'll use a little trick to make this mass matrix diagonal, and in the following then first compute all of this as a matrix and then extract the diagonal elements for later use:

      mass_matrix.reinit(dsp);
      assemble_mass_matrix_diagonal(mass_matrix);
    @@ -698,7 +698,7 @@
    @ update_gradients
    Shape function gradients.
    @ update_quadrature_points
    Transformed quadrature points.

    ObstacleProblem::assemble_mass_matrix_diagonal

    -

    The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

    +

    The next function is used in the computation of the diagonal mass matrix $B$ used to scale variables in the active set method. As discussed in the introduction, we get the mass matrix to be diagonal by choosing the trapezoidal rule for quadrature. Doing so we don't really need the triple loop over quadrature points, indices $i$ and indices $j$ any more and can, instead, just use a double loop. The rest of the function is obvious given what we have discussed in many of the previous tutorial programs.

    Note that at the time this function is called, the constraints object only contains boundary value constraints; we therefore do not have to pay attention in the last copy-local-to-global step to preserve the values of matrix entries that may later on be constrained by the active set.

    Note also that the trick with the trapezoidal rule only works if we have in fact $Q_1$ elements. For higher order elements, one would need to use a quadrature formula that has quadrature points at all the support points of the finite element. Constructing such a quadrature formula isn't really difficult, but not the point here, and so we simply assert at the top of the function that our implicit assumption about the finite element is in fact satisfied.

      template <int dim>
    @@ -816,7 +816,7 @@
      }
     

    ObstacleProblem::solve

    -

    There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

    +

    There is nothing to say really about the solve function. In the context of a Newton method, we are not typically interested in very high accuracy (why ask for a highly accurate solution of a linear problem that we know only gives us an approximation of the solution of the nonlinear problem), and so we use the ReductionControl class that stops iterations when either an absolute tolerance is reached (for which we choose $10^{-12}$) or when the residual is reduced by a certain factor (here, $10^{-3}$).

      template <int dim>
      void ObstacleProblem<dim>::solve()
      {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-11-15 06:44:30.307679418 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_42.html 2024-11-15 06:44:30.307679418 +0000 @@ -186,10 +186,10 @@ \end{align*}" src="form_4964.png"/>

    Here, the first of these equations defines the relationship between strain $\varepsilon(\mathbf u)=\frac{1}{2}\left(\nabla \mathbf u
-  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
-\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
+  + \nabla \mathbf u^T\right)$ and stress $\sigma$ via the fourth-order compliance tensor $A$; $\varepsilon^p$ provides the plastic component of the strain to ensure that the stress does not exceed the yield stress. We will only consider isotropic materials for which $A$ can be expressed in terms of the Lamé moduli $\lambda$ and $\mu$ or alternatively in terms of the bulk modulus $\kappa$ and $\mu$. The second equation is the force balance; we will here not consider any body forces and henceforth assume that $\mathbf f=0$. The complementarity condition in the third line implies that $\varepsilon^p=0$ if $\mathcal{F}(\sigma)< 0$ but that $\varepsilon^p$ may be a nonzero tensor if and only if $\mathcal{F}(\sigma) = 0$, and in particular that in this case $\varepsilon^p$ must point in the direction $\partial
+\mathcal{F}(\sigma)/\partial \sigma$. The inequality $\mathcal{F}(\sigma)\le 0$ is a statement of the fact that plastic materials can only support a finite amount of stress; in other words, they react with plastic deformations $\varepsilon^p$ if external forces would result in a stress $\sigma$ for which $\mathcal{F}(\sigma)> 0$ would result. A typical form for this yield function is $\mathcal{F}(\sigma)=|\sigma^D|-\sigma_{\text{yield}}$ where $\tau^D
 = \tau - \dfrac{1}{3}tr(\tau)I$ is the deviatoric part of a tensor and $|\cdot|$ denotes the Frobenius norm.

    -

    Further equations describe a fixed, zero displacement on $\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
+<p>Further equations describe a fixed, zero displacement on <picture><source srcset=$\Gamma_D$ and that on the surface $\Gamma_C=\partial\Omega\backslash\Gamma_D$ where contact may appear, the normal force $\sigma_n=\mathbf n \cdot (\sigma(\mathbf u)
   \mathbf n)$ exerted by the obstacle is inward (no "pull" by the obstacle on our body) and with zero tangential component $\mathbf \sigma_t= \sigma \mathbf n - \mathbf \sigma_n \mathbf n
 = \sigma \mathbf n - [\mathbf n \cdot(\sigma \mathbf n)]\mathbf n$. The last condition is again a complementarity condition that implies that on $\Gamma_C$, the normal force can only be nonzero if the body is in contact with the obstacle; the second part describes the impenetrability of the obstacle and the body. The last two equations are commonly referred to as the Signorini contact conditions.

    Most materials - especially metals - have the property that they show some hardening as a result of deformation. In other words, $\sigma_{\text{yield}}$ increases with deformation. In practice, it is not the elastic deformation that results in hardening, but the plastic component. There are different constitutive laws to describe those material behaviors. The simplest one is called linear isotropic hardening described by the flow function $\mathcal{F}(\sigma,\varepsilon^p) = \vert\sigma^D\vert - (\sigma_0 +
@@ -236,7 +236,7 @@
 <p>A strict approach would keep the active set fixed while we iterate the Newton method to convergence (or maybe the other way around: find the final active set before moving on to the next Newton iteration). In practice, it turns out that it is sufficient to do only a single Newton step per active set iteration, and so we will iterate over them concurrently. We will also, every once in a while, refine the mesh.</p>
 <p><a class=

    A Newton method for the plastic nonlinearity

    As mentioned, we will treat the nonlinearity of the operator $P_\Pi$ by applying a Newton method, despite the fact that the operator is not differentiable in the strict sense. However, it satisfies the conditions of slant differentiability and this turns out to be enough for Newton's method to work. The resulting method then goes by the name semi-smooth Newton method, which sounds impressive but is, in reality, just a Newton method applied to a semi-smooth function with an appropriately chosen "derivative".

    -

    In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

    +

    In the current case, we will run our iteration by solving in each iteration $i$ the following equation (still an inequality, but linearized):

    \begin{align*}
   \label{eq:linearization}
   \left(I_{\Pi}\varepsilon(\tilde {\mathbf u}^{i}),
@@ -267,7 +267,7 @@
     I\right),
 \end{gather*}

    -

    where $I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

    +

    where $I$ and $\mathbb{I}$ are the identity tensors of rank 2 and 4, respectively.

    Note that this problem corresponds to a linear elastic contact problem where $I_\Pi$ plays the role of the elasticity tensor $C=A^{-1}$. Indeed, if the material is not plastic at a point, then $I_\Pi=C$. However, at places where the material is plastic, $I_\Pi$ is a spatially varying function. In any case, the system we have to solve for the Newton iterate $\tilde {\mathbf u}^{i}$ gets us closer to the goal of rewriting our problem in a way that allows us to use well-known solvers and preconditioners for elliptic systems.

    As a final note about the Newton method let us mention that as is common with Newton methods we need to globalize it by controlling the step length. In other words, while the system above solves for $\tilde {\mathbf u}^{i}$, the final iterate will rather be

    \begin{align*}
@@ -343,7 +343,7 @@
 If <picture><source srcset=$\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\left\|
  {\hat R}\left({\mathbf u}^{i}\right) \right\|_{\ell_2} < \delta$ then stop, else set $i=i+1$ and go to step (1). This step ensures that we only stop iterations if both the correct active set has been found and the plasticity has been iterated to sufficient accuracy. -

    In step 3 of this algorithm, the matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the bases for the displacements and Lagrange multiplier (contact forces) and it is not quadratic in our situation since $\Lambda^k$ is only defined on $\Gamma_C$, i.e., the surface where contact may happen. As shown in the paper, we can choose $B$ to be a matrix that has only one entry per row, (see also Hüeber, Wohlmuth: A primal-dual active set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg. 194, 2005, pp. 3147-3166). The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$

    +

    In step 3 of this algorithm, the matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the bases for the displacements and Lagrange multiplier (contact forces) and it is not quadratic in our situation since $\Lambda^k$ is only defined on $\Gamma_C$, i.e., the surface where contact may happen. As shown in the paper, we can choose $B$ to be a matrix that has only one entry per row, (see also Hüeber, Wohlmuth: A primal-dual active set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg. 194, 2005, pp. 3147-3166). The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$

    \begin{gather*}G_p = \begin{cases}
 g_{h,p}, & \text{if}\quad p\in\mathcal{S}\\
 0, & \text{if}\quad p\notin\mathcal{S}.
@@ -442,7 +442,7 @@
 <div class= 

    The ConstitutiveLaw class template

    -

    This class provides an interface for a constitutive law, i.e., for the relationship between strain $\varepsilon(\mathbf u)$ and stress $\sigma$. In this example we are using an elastoplastic material behavior with linear, isotropic hardening. Such materials are characterized by Young's modulus $E$, Poisson's ratio $\nu$, the initial yield stress $\sigma_0$ and the isotropic hardening parameter $\gamma$. For $\gamma =
+<p>This class provides an interface for a constitutive law, i.e., for the relationship between strain <picture><source srcset=$\varepsilon(\mathbf u)$ and stress $\sigma$. In this example we are using an elastoplastic material behavior with linear, isotropic hardening. Such materials are characterized by Young's modulus $E$, Poisson's ratio $\nu$, the initial yield stress $\sigma_0$ and the isotropic hardening parameter $\gamma$. For $\gamma =
    0$ we obtain perfect elastoplastic behavior.

    As explained in the paper that describes this program, the first Newton steps are solved with a completely elastic material model to avoid having to deal with both nonlinearities (plasticity and contact) at once. To this end, this class has a function set_sigma_0() that we use later on to simply set $\sigma_0$ to a very large value – essentially guaranteeing that the actual stress will not exceed it, and thereby producing an elastic material. When we are ready to use a plastic model, we set $\sigma_0$ back to its proper value, using the same function. As a result of this approach, we need to leave sigma_0 as the only non-const member variable of this class.

      template <int dim>
    @@ -477,7 +477,7 @@
     

    The constructor of the ConstitutiveLaw class sets the required material parameter for our deformable body. Material parameters for elastic isotropic media can be defined in a variety of ways, such as the pair $E,
-   \nu$ (elastic modulus and Poisson's number), using the Lamé parameters $\lambda,mu$ or several other commonly used conventions. Here, the constructor takes a description of material parameters in the form of $E,\nu$, but since this turns out to these are not the coefficients that appear in the equations of the plastic projector, we immediately convert them into the more suitable set $\kappa,\mu$ of bulk and shear moduli. In addition, the constructor takes $\sigma_0$ (the yield stress absent any plastic strain) and $\gamma$ (the hardening parameter) as arguments. In this constructor, we also compute the two principal components of the stress-strain relation and its linearization.

    + \nu$" src="form_5042.png"/> (elastic modulus and Poisson's number), using the Lamé parameters $\lambda,mu$ or several other commonly used conventions. Here, the constructor takes a description of material parameters in the form of $E,\nu$, but since this turns out to these are not the coefficients that appear in the equations of the plastic projector, we immediately convert them into the more suitable set $\kappa,\mu$ of bulk and shear moduli. In addition, the constructor takes $\sigma_0$ (the yield stress absent any plastic strain) and $\gamma$ (the hardening parameter) as arguments. In this constructor, we also compute the two principal components of the stress-strain relation and its linearization.

      template <int dim>
      ConstitutiveLaw<dim>::ConstitutiveLaw(double E,
      double nu,
    @@ -713,7 +713,7 @@

    The BitmapFile and ChineseObstacle classes

    The following two classes describe the obstacle outlined in the introduction, i.e., the Chinese character. The first of the two, BitmapFile is responsible for reading in data from a picture file stored in pbm ascii format. This data will be bilinearly interpolated and thereby provides a function that describes the obstacle. (The code below shows how one can construct a function by interpolating between given data points. One could use the Functions::InterpolatedUniformGridData, introduced after this tutorial program was written, which does exactly what we want here, but it is instructive to see how to do it by hand.)

    The data which we read from the file will be stored in a double std::vector named obstacle_data. This vector composes the base to calculate a piecewise bilinear function as a polynomial interpolation. The data we will read from a file consists of zeros (white) and ones (black).

    -

    The hx,hy variables denote the spacing between pixels in $x$ and $y$ directions. nx,ny are the numbers of pixels in each of these directions. get_value() returns the value of the image at a given location, interpolated from the adjacent pixel values.

    +

    The hx,hy variables denote the spacing between pixels in $x$ and $y$ directions. nx,ny are the numbers of pixels in each of these directions. get_value() returns the value of the image at a given location, interpolated from the adjacent pixel values.

      template <int dim>
      class BitmapFile
      {
    @@ -920,7 +920,7 @@
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
    -

    The next block of variables corresponds to the solution and the linear systems we need to form. In particular, this includes the Newton matrix and right hand side; the vector that corresponds to the residual (i.e., the Newton right hand side) but from which we have not eliminated the various constraints and that is used to determine which degrees of freedom need to be constrained in the next iteration; and a vector that corresponds to the diagonal of the $B$ matrix briefly mentioned in the introduction and discussed in the accompanying paper.

    +

    The next block of variables corresponds to the solution and the linear systems we need to form. In particular, this includes the Newton matrix and right hand side; the vector that corresponds to the residual (i.e., the Newton right hand side) but from which we have not eliminated the various constraints and that is used to determine which degrees of freedom need to be constrained in the next iteration; and a vector that corresponds to the diagonal of the $B$ matrix briefly mentioned in the introduction and discussed in the accompanying paper.

      TrilinosWrappers::SparseMatrix newton_matrix;
     
    @@ -1296,7 +1296,7 @@
     

    PlasticityContactProblem::assemble_mass_matrix_diagonal

    -

    The next helper function computes the (diagonal) mass matrix that is used to determine the active set of the active set method we use in the contact algorithm. This matrix is of mass matrix type, but unlike the standard mass matrix, we can make it diagonal (even in the case of higher order elements) by using a quadrature formula that has its quadrature points at exactly the same locations as the interpolation points for the finite element are located. We achieve this by using a QGaussLobatto quadrature formula here, along with initializing the finite element with a set of interpolation points derived from the same quadrature formula. The remainder of the function is relatively straightforward: we put the resulting matrix into the given argument; because we know the matrix is diagonal, it is sufficient to have a loop over only $i$ and not over $j$. Strictly speaking, we could even avoid multiplying the shape function's values at quadrature point q_point by itself because we know the shape value to be a vector with exactly one one which when dotted with itself yields one. Since this function is not time critical we add this term for clarity.

    +

    The next helper function computes the (diagonal) mass matrix that is used to determine the active set of the active set method we use in the contact algorithm. This matrix is of mass matrix type, but unlike the standard mass matrix, we can make it diagonal (even in the case of higher order elements) by using a quadrature formula that has its quadrature points at exactly the same locations as the interpolation points for the finite element are located. We achieve this by using a QGaussLobatto quadrature formula here, along with initializing the finite element with a set of interpolation points derived from the same quadrature formula. The remainder of the function is relatively straightforward: we put the resulting matrix into the given argument; because we know the matrix is diagonal, it is sufficient to have a loop over only $i$ and not over $j$. Strictly speaking, we could even avoid multiplying the shape function's values at quadrature point q_point by itself because we know the shape value to be a vector with exactly one one which when dotted with itself yields one. Since this function is not time critical we add this term for clarity.

      template <int dim>
      void PlasticityContactProblem<dim>::assemble_mass_matrix_diagonal(
    @@ -1510,7 +1510,7 @@
    @ update_gradients
    Shape function gradients.
    -

    Having computed the stress-strain tensor and its linearization, we can now put together the parts of the matrix and right hand side. In both, we need the linearized stress-strain tensor times the symmetric gradient of $\varphi_i$, i.e. the term $I_\Pi\varepsilon(\varphi_i)$, so we introduce an abbreviation of this term. Recall that the matrix corresponds to the bilinear form $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the notation of the accompanying publication, whereas the right hand side is $F_i=([I_\Pi-P_\Pi
+</div><!-- fragment --><p>Having computed the stress-strain tensor and its linearization, we can now put together the parts of the matrix and right hand side. In both, we need the linearized stress-strain tensor times the symmetric gradient of <picture><source srcset=$\varphi_i$, i.e. the term $I_\Pi\varepsilon(\varphi_i)$, so we introduce an abbreviation of this term. Recall that the matrix corresponds to the bilinear form $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the notation of the accompanying publication, whereas the right hand side is $F_i=([I_\Pi-P_\Pi
    C]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$ where $u$ is the current linearization points (typically the last solution). This might suggest that the right hand side will be zero if the material is completely elastic (where $I_\Pi=P_\Pi$) but this ignores the fact that the right hand side will also contain contributions from non-homogeneous constraints due to the contact.

    The code block that follows this adds contributions that are due to boundary forces, should there be any.

      const SymmetricTensor<2, dim> stress_phi_i =
    @@ -1682,11 +1682,11 @@

    PlasticityContactProblem::solve_newton_system

    The last piece before we can discuss the actual Newton iteration on a single mesh is the solver for the linear systems. There are a couple of complications that slightly obscure the code, but mostly it is just setup then solve. Among the complications are:

      -
    • For the hanging nodes we have to apply the AffineConstraints::set_zero function to newton_rhs. This is necessary if a hanging node with solution value $x_0$ has one neighbor with value $x_1$ which is in contact with the obstacle and one neighbor $x_2$ which is not in contact. Because the update for the former will be prescribed, the hanging node constraint will have an inhomogeneity and will look like $x_0 = x_1/2 +
+<li>For the hanging nodes we have to apply the <a class=AffineConstraints::set_zero function to newton_rhs. This is necessary if a hanging node with solution value $x_0$ has one neighbor with value $x_1$ which is in contact with the obstacle and one neighbor $x_2$ which is not in contact. Because the update for the former will be prescribed, the hanging node constraint will have an inhomogeneity and will look like $x_0 = x_1/2 +
    \text{gap}/2$. So the corresponding entries in the right-hand-side are non-zero with a meaningless value. These values we have to set to zero.
    • Like in step-40, we need to shuffle between vectors that do and do not have ghost elements when solving or using the solution.
    -

    The rest of the function is similar to step-40 and step-41 except that we use a BiCGStab solver instead of CG. This is due to the fact that for very small hardening parameters $\gamma$, the linear system becomes almost semidefinite though still symmetric. BiCGStab appears to have an easier time with such linear systems.

    +

    The rest of the function is similar to step-40 and step-41 except that we use a BiCGStab solver instead of CG. This is due to the fact that for very small hardening parameters $\gamma$, the linear system becomes almost semidefinite though still symmetric. BiCGStab appears to have an easier time with such linear systems.

      template <int dim>
      void PlasticityContactProblem<dim>::solve_newton_system()
      {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 2024-11-15 06:44:30.427680490 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_43.html 2024-11-15 06:44:30.427680490 +0000 @@ -190,37 +190,37 @@

    Much inspiration for this program comes from step-31 but several of the techniques discussed here are original.

    Advection-dominated two-phase flow mathematical model

    We consider the flow of a two-phase immiscible, incompressible fluid. Capillary and gravity effects are neglected, and viscous effects are assumed dominant. The governing equations for such a flow that are identical to those used in step-21 and are

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \mathbf{u}_t &= - \mathbf{K} \lambda_t \left(S\right) \nabla p, \\
   \nabla \cdot \mathbf{u}_t &= q, \\
   \epsilon \frac{\partial S}{\partial t} + \nabla \cdot \left( \mathbf{u}_t  F\left( S \right) \right)&=0,
-\end{align*} +\end{align*}" src="form_5061.png"/>

    -

    where $S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

    -\begin{align*}
+<p> where <picture><source srcset=$S$ is the saturation (volume fraction between zero and one) of the second (wetting) phase, $p$ is the pressure, $\mathbf{K}$ is the permeability tensor, $\lambda_t$ is the total mobility, $\epsilon$ is the porosity, $F$ is the fractional flow of the wetting phase, $q$ is the source term and $\mathbf{u}_t$ is the total velocity. The total mobility, fractional flow of the wetting phase and total velocity are respectively given by

    +\begin{align*}
    \lambda_t(S)&= \lambda_w + \lambda_{nw} = \frac{k_{rw}(S)}{\mu_w} + \frac{k_{rnw}(S)}{\mu_{nw}}, \\
    F(S) &= \frac{\lambda_w}{\lambda_t} = \frac{\lambda_w}{\lambda_w + \lambda_{nw}} = \frac{k_{rw}(S)/\mu_w}{k_{rw}(S)/\mu_w + k_{rnw}(S)/\mu_{nw}}, \\
    \mathbf{u}_t &= \mathbf{u}_w + \mathbf{u}_{nw} = -\lambda_t(S)\mathbf{K} \cdot \nabla p,
-\end{align*} +\end{align*}" src="form_5065.png"/>

    -

    where subscripts $w, nw$ represent the wetting and non-wetting phases, respectively.

    -

    For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

    -\begin{align*}
+<p> where subscripts <picture><source srcset=$w, nw$ represent the wetting and non-wetting phases, respectively.

    +

    For convenience, the porosity $\epsilon$ in the saturation equation, which can be considered a scaling factor for the time variable, is set to one. Following a commonly used prescription for the dependence of the relative permeabilities $k_{rw}$ and $k_{rnw}$ on saturation, we use

    +\begin{align*}
    k_{rw}  &= S^2, \qquad&\qquad
    k_{rnw} &= \left( 1-S \right)^2.
-\end{align*} +\end{align*}" src="form_5069.png"/>

    The porous media equations above are augmented by initial conditions for the saturation and boundary conditions for the pressure. Since saturation and the gradient of the pressure uniquely determine the velocity, no boundary conditions are necessary for the velocity. Since the flow equations do not contain time derivatives, initial conditions for the velocity and pressure variables are not required. The flow field separates the boundary into inflow or outflow parts. Specifically,

    -\[
+<picture><source srcset=\[
    \mathbf{\Gamma}_{in}(t) = \left\{\mathbf{x} \in \partial \Omega:\mathbf{n} \cdot \mathbf{u}_t<0\right\},
-\] +\]" src="form_5070.png"/>

    -

    and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

    +

    and we arrive at a complete model by also imposing boundary values for the saturation variable on the inflow boundary $\mathbf{\Gamma}_{in}$.

    Adaptive operator splitting and time stepping

    As seen in step-21, solving the flow equations for velocity and pressure are the parts of the program that take far longer than the (explicit) updating step for the saturation variable once we know the flow variables. On the other hand, the pressure and velocity depend only weakly on saturation, so one may think about only solving for pressure and velocity every few time steps while updating the saturation in every step. If we can find a criterion for when the flow variables need to be updated, we call this splitting an "adaptive operator splitting" scheme.

    Here, we use the following a posteriori criterion to decide when to re-compute pressure and velocity variables (detailed derivations and descriptions can be found in [Chueh2013]):

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \theta(n,n_p)
   =
     \max_{\kappa\in{\mathbb T}}
@@ -230,49 +230,49 @@
       - \frac 1{\lambda_t\left(S^{(n_p)}\right)} \right\|_{L^\infty(\kappa)}
     \left\|\|\mathbf{K}^{-1}\|_1\right\|_{L^\infty(\kappa)}
     \right).
-\end{align*} +\end{align*}" src="form_5072.png"/>

    -

    where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and $n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

    -

    In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

    -\[
+<p> where superscripts in parentheses denote the number of the saturation time step at which any quantity is defined and <picture><source srcset=$n_p<n$ represents the last step where we actually computed the pressure and velocity. If $\theta(n,n_p)$ exceeds a certain threshold we re-compute the flow variables; otherwise, we skip this computation in time step $n$ and only move the saturation variable one time step forward.

    +

    In short, the algorithm allows us to perform a number of saturation time steps of length $\Delta t_c^{(n)}=t^{(n)}_c-t^{(n-1)}_c$ until the criterion above tells us to re-compute velocity and pressure variables, leading to a macro time step of length

    +\[
    \Delta t_p^{(n)} = \sum_{i=n_p+1}^{n} \Delta t_c^{(i)}.
-\] +\]" src="form_5076.png"/>

    We choose the length of (micro) steps subject to the Courant-Friedrichs-Lewy (CFL) restriction according to the criterion

    -\[
+<picture><source srcset=\[
   \Delta t_c = \frac{\textrm{min}_{K}h_{K}}{7 \|\mathbf{u}_t\|_{L^{\infty}\left(\Omega\right)}},
-\] +\]" src="form_5077.png"/>

    which we have confirmed to be stable for the choice of finite element and time stepping scheme for the saturation equation discussed below ( $h_K$ denotes the diameter of cell $K$). The result is a scheme where neither micro nor macro time steps are of uniform length, and both are chosen adaptively.

    Time discretization

    Using this time discretization, we obtain the following set of equations for each time step from the IMPES approach (see step-21):

    -\begin{align*}
+<picture><source srcset=\begin{align*}
    \mathbf{u}^{(n)}_t + \lambda_t\left(S^{(n-1)}\right) \mathbf{K} \nabla p^{(n)} =0, \\
    \nabla \cdot \mathbf{u}^{(n)}_t = q, \\
    \epsilon \left( \frac{S^{(n-1)}-S^{(n)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right) \nabla \cdot \mathbf{u}^{(n)}_t =0.
-\end{align*} +\end{align*}" src="form_5078.png"/>

    -

    Using the fact that $\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

    -\begin{align*}
+<p>Using the fact that <picture><source srcset=$\nabla \cdot \mathbf{u}_t = q$, the time discrete saturation equation becomes

    +\begin{align*}
   &\epsilon \left( \frac{S^{(n)}-S^{(n-1)}}{\Delta t^{(n)}_c} \right) + \mathbf{u}^{(n)}_t \cdot \nabla F\left(S^{(n-1)}\right) + F\left(S^{(n-1)}\right)q=0.
-\end{align*} +\end{align*}" src="form_5080.png"/>

    Weak form, space discretization for the pressure-velocity part

    -

    By multiplying the equations defining the total velocity $\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

    -\begin{gather*}
+<p>By multiplying the equations defining the total velocity <picture><source srcset=$\mathbf u_t^{(n)}$ and the equation that expresses its divergence in terms of source terms, with test functions $\mathbf{v}$ and $w$ respectively and then integrating terms by parts as necessary, the weak form of the problem reads: Find $\mathbf u, p$ so that for all test functions $\mathbf{v}, w$ there holds

    +\begin{gather*}
    \left( \left( \mathbf{K} \lambda_t\left(S^{(n-1)}\right) \right)^{-1} \mathbf{u}^{(n)}_t, \mathbf{v}\right)_{\Omega} - \left(p^{(n)}, \nabla \cdot \mathbf{v}\right)_{\Omega} = -\left(p^{(n)}, \mathbf{n} \cdot \mathbf{v} \right)_{\partial \Omega}, \\
    - \left( \nabla \cdot \mathbf{u}^{(n)}_t,w\right)_{\Omega} = - \big(q,w\big)_{\Omega}.
-\end{gather*} +\end{gather*}" src="form_5084.png"/>

    -

    Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
-\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
-u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
-v=0$.

    -

    We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

    +

    Here, $\mathbf{n}$ represents the unit outward normal vector to $\partial
+\Omega$ and the pressure $p^{(n)}$ can be prescribed weakly on the open part of the boundary $\partial \Omega$ whereas on those parts where a velocity is prescribed (for example impermeable boundaries with $\mathbf n \cdot \mathbf
+u=0$ the term disappears altogether because $\mathbf n \cdot \mathbf
+v=0$.

    +

    We use continuous finite elements to discretize the velocity and pressure equations. Specifically, we use mixed finite elements to ensure high order approximation for both vector (e.g. a fluid velocity) and scalar variables (e.g. pressure) simultaneously. For saddle point problems, it is well established that the so-called Babuska-Brezzi or Ladyzhenskaya-Babuska-Brezzi (LBB) conditions [BrezziFortin], [Chen2005] need to be satisfied to ensure stability of the pressure-velocity system. These stability conditions are satisfied in the present work by using elements for velocity that are one order higher than for the pressure, i.e. $u_h \in Q^d_{p+1}$ and $p_h \in Q_p$, where $p=1$, $d$ is the space dimension, and $Q_s$ denotes the space of tensor product Lagrange polynomials of degree $s$ in each variable.

    Stabilization, weak form and space discretization for the saturation transport equation

    The chosen $Q_1$ elements for the saturation equation do not lead to a stable discretization without upwinding or other kinds of stabilization, and spurious oscillations will appear in the numerical solution. Adding an artificial diffusion term is one approach to eliminating these oscillations [Chen2005]. On the other hand, adding too much diffusion smears sharp fronts in the solution and suffers from grid-orientation difficulties [Chen2005]. To avoid these effects, we use the artificial diffusion term proposed by [GuermondPasquetti2008] and validated in [Chueh2013] and [KHB12], as well as in step-31.

    This method modifies the (discrete) weak form of the saturation equation to read

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \left(\epsilon \frac{\partial S_h}{\partial t},\sigma_h\right)
   -
   \left(\mathbf{u}_t  F\left( S_h \right),
@@ -285,53 +285,53 @@
   &=0
   \qquad
   \forall \sigma_h,
-\end{align*} +\end{align*}" src="form_5092.png"/>

    -

    where $\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

    -

    Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

    -\[
+<p> where <picture><source srcset=$\nu$ is the artificial diffusion parameter and $\hat F$ is an appropriately chosen numerical flux on the boundary of the domain (we choose the obvious full upwind flux for this).

    +

    Following [GuermondPasquetti2008] (and as detailed in [Chueh2013]), we use the parameter as a piecewise constant function set on each cell $K$ with the diameter $h_{K}$ as

    +\[
    \nu(S_h)|_{K} = \beta \| \mathbf{u}_t \max\{F'(S_h),1\} \|_{L^{\infty}(K)} \textrm{min} \left\{ h_{K},h^{\alpha}_{K} \frac{\|\textrm{Res}(S_h)\|_{L^{\infty}(K)}}{c(\mathbf{u}_t,S)} \right\}
-\] +\]" src="form_5095.png"/>

    -

    where $\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

    -\[
+<p> where <picture><source srcset=$\alpha$ is a stabilization exponent and $\beta$ is a dimensionless user-defined stabilization constant. Following [GuermondPasquetti2008] as well as the implementation in step-31, the velocity and saturation global normalization constant, $c(\mathbf{u}_t,S)$, and the residual $\textrm{Res}(S)$ are respectively given by

    +\[
    c(\mathbf{u}_t,S) = c_R \|\mathbf{u}_t \max\{F'(S),1\}\|_{L^{\infty}(\Omega)} \textrm{var}(S)^\alpha | \textrm{diam} (\Omega) |^{\alpha - 2}
-\] +\]" src="form_5098.png"/>

    and

    -\[
+<picture><source srcset=\[
    \textrm{Res}(S) = \left( \epsilon \frac{\partial S}{\partial t} + \mathbf{u}_t \cdot \nabla F(S) + F(S)q \right) \cdot S^{\alpha - 1}
-\] +\]" src="form_5099.png"/>

    -

    where $c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
-\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

    -

    This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

    -\begin{align*}
+<p> where <picture><source srcset=$c_R$ is a second dimensionless user-defined constant, $\textrm{diam}(\Omega)$ is the diameter of the domain and $\textrm{var}(S) =
+\textrm{max}_{\Omega} S - \textrm{min}_{\Omega} S$ is the range of the present saturation values in the entire computational domain $\Omega$.

    +

    This stabilization scheme has a number of advantages over simpler schemes such as finite volume (or discontinuous Galerkin) methods or streamline upwind Petrov Galerkin (SUPG) discretizations. In particular, the artificial diffusion term acts primarily in the vicinity of discontinuities since the residual is small in areas where the saturation is smooth. It therefore provides for a higher degree of accuracy. On the other hand, it is nonlinear since $\nu$ depends on the saturation $S$. We avoid this difficulty by treating all nonlinear terms explicitly, which leads to the following fully discrete problem at time step $n$:

    +\begin{align*}
    &\left( \epsilon S_h^{(n)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\mathbf{u}^{*}_t,\nabla\sigma_h\Big)_{\Omega} + \Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{*}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
    & \quad = \left( \epsilon S_h^{(n-1)},\sigma_h\right)_{\Omega} - \Delta t^{(n)}_c \bigg(\nu\left(S_h^{(n-1)}\right)\nabla S_h^{(n-1)},\nabla\sigma_h\bigg)_{\Omega} \nonumber \\
    & \qquad + \Delta t^{(n)}_c \bigg(\mathbf{n}\cdot\nu\left(S_h^{(n-1)}\right)\nabla S^{(n-1)},\sigma_h\bigg)_{\partial\Omega}
-\end{align*} +\end{align*}" src="form_5102.png"/>

    -

    where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

    +

    where $\mathbf{u}_t^{*}$ is the velocity linearly extrapolated from $\mathbf{u}^{(n_p)}_t$ and $\mathbf{u}^{(n_{pp})}_t$ to the current time $t^{(n)}$ if $\theta<\theta^*$ while $\mathbf{u}_t^{*}$ is $\mathbf{u}^{(n_p)}_t$ if $\theta>\theta^*$. Consequently, the equation is linear in $S_h^{(n)}$ and all that is required is to solve with a mass matrix on the saturation space.

    Since the Dirichlet boundary conditions for saturation are only imposed on the inflow boundaries, the third term on the left hand side of the equation above needs to be split further into two parts:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   &\Delta t^{(n)}_c \Big(F\left(S_h^{(n-1)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_t\right),\sigma_h\Big)_{\partial\Omega} \nonumber \\
   &\qquad= \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(+)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(+)}\right),\sigma_h\Big)_{\partial\Omega_{(+)}} + \Delta t^{(n)}_c \Big(F\left(S^{(n-1)}_{(-)}\right)\left(\mathbf{n}\cdot\mathbf{u}^{(n)}_{t(-)}\right),\sigma_h\Big)_{\partial\Omega_{(-)}}
-\end{align*} +\end{align*}" src="form_5110.png"/>

    -

    where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
-  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
-  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

    +

    where $\partial\Omega_{(-)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n}
+  \cdot \mathbf{u}_t<0\right\}$ and $\partial\Omega_{(+)} = \left\{\mathbf{x} \in \partial\Omega : \mathbf{n} \cdot
+  \mathbf{u}_t>0\right\}$ represent inflow and outflow boundaries, respectively. We choose values using an upwind formulation, i.e. $S^{(n-1)}_{(+)}$ and $\mathbf{u}^{(n)}_{t(+)}$ correspond to the values taken from the present cell, while the values of $S^{(n-1)}_{(-)}$ and $\mathbf{u}^{(n)}_{t(-)}$ are those taken from the neighboring boundary $\partial\Omega_{(-)}$.

    Adaptive mesh refinement

    Choosing meshes adaptively to resolve sharp saturation fronts is an essential ingredient to achieve efficiency in our algorithm. Here, we use the same shock-type refinement approach used in [Chueh2013] to select those cells that should be refined or coarsened. The refinement indicator for each cell $K$ of the triangulation is computed by

    -\[
+<picture><source srcset=\[
    \eta_{K} = |\nabla S_h(\mathbf x_K)|
-\] +\]" src="form_5118.png"/>

    -

    where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

    +

    where $\nabla S_h(\mathbf x_K)$ is the gradient of the discrete saturation variable evaluated at the center $\mathbf x_K$ of cell $K$. This approach is analogous to ones frequently used in compressible flow problems, where density gradients are used to indicate refinement. That said, as we will discuss at the end of the results section, this turns out to not be a very useful criterion since it leads to refinement basically everywhere. We only show it here for illustrative purposes.

    The linear system and its preconditioning

    -

    Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step $(n)$ of the following form:

    -\[
+<p>Following the discretization of the governing equations discussed above, we obtain a linear system of equations in time step <picture><source srcset=$(n)$ of the following form:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-11-15 06:44:30.571681776 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_44.html 2024-11-15 06:44:30.571681776 +0000 @@ -205,143 +205,143 @@

    Notation

    One can think of fourth-order tensors as linear operators mapping second-order tensors (matrices) onto themselves in much the same way as matrices map vectors onto vectors. There are various fourth-order unit tensors that will be required in the forthcoming presentation. The fourth-order unit tensors $\mathcal{I}$ and $\overline{\mathcal{I}}$ are defined by

    -\[
+<picture><source srcset=\[
         \mathbf{A} = \mathcal{I}:\mathbf{A}
                 \qquad \text{and} \qquad
         \mathbf{A}^T = \overline{\mathcal{I}}:\mathbf{A} \, .
-\] +\]" src="form_5166.png"/>

    Note $\mathcal{I} \neq \overline{\mathcal{I}}^T$. Furthermore, we define the symmetric and skew-symmetric fourth-order unit tensors by

    -\[
+<picture><source srcset=\[
         \mathcal{S} \dealcoloneq \dfrac{1}{2}[\mathcal{I} + \overline{\mathcal{I}}]
                 \qquad \text{and} \qquad
         \mathcal{W} \dealcoloneq \dfrac{1}{2}[\mathcal{I} - \overline{\mathcal{I}}] \, ,
-\] +\]" src="form_5167.png"/>

    such that

    -\[
+<picture><source srcset=\[
         \dfrac{1}{2}[\mathbf{A} + \mathbf{A}^T] = \mathcal{S}:\mathbf{A}
                 \qquad \text{and} \qquad
         \dfrac{1}{2}[\mathbf{A} - \mathbf{A}^T] = \mathcal{W}:\mathbf{A} \, .
-\] +\]" src="form_5168.png"/>

    The fourth-order SymmetricTensor returned by identity_tensor() is $\mathcal{S}$.

    Kinematics

    -

    Let the time domain be denoted $\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

    -\[
+<p>Let the time domain be denoted <picture><source srcset=$\mathbb{T} = [0,T_{\textrm{end}}]$, where $t \in \mathbb{T}$ and $T_{\textrm{end}}$ is the total problem duration. Consider a continuum body that occupies the reference configuration $\Omega_0$ at time $t=0$. Particles in the reference configuration are identified by the position vector $\mathbf{X}$. The configuration of the body at a later time $t>0$ is termed the current configuration, denoted $\Omega$, with particles identified by the vector $\mathbf{x}$. The nonlinear map between the reference and current configurations, denoted $\boldsymbol{\varphi}$, acts as follows:

    +\[
         \mathbf{x} = \boldsymbol{\varphi}(\mathbf{X},t) \, .
-\] +\]" src="form_5174.png"/>

    The material description of the displacement of a particle is defined by

    -\[
+<picture><source srcset=\[
         \mathbf{U}(\mathbf{X},t) = \mathbf{x}(\mathbf{X},t) - \mathbf{X} \, .
-\] +\]" src="form_5175.png"/>

    The deformation gradient $\mathbf{F}$ is defined as the material gradient of the motion:

    -\[
+<picture><source srcset=\[
         \mathbf{F}(\mathbf{X},t)
                 \dealcoloneq \dfrac{\partial \boldsymbol{\varphi}(\mathbf{X},t)}{\partial \mathbf{X}}
                 = \textrm{Grad}\ \mathbf{x}(\mathbf{X},t)
                 = \mathbf{I} + \textrm{Grad}\ \mathbf{U} \, .
-\] +\]" src="form_5176.png"/>

    -

    The determinant of the of the deformation gradient $J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

    -\[
+<p> The determinant of the of the deformation gradient <picture><source srcset=$J(\mathbf{X},t) \dealcoloneq \textrm{det}\ \mathbf{F}(\mathbf{X},t) > 0$ maps corresponding volume elements in the reference and current configurations, denoted $\textrm{d}V$ and $\textrm{d}v$, respectively, as

    +\[
         \textrm{d}v = J(\mathbf{X},t)\; \textrm{d}V \, .
-\] +\]" src="form_5180.png"/>

    -

    Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

    +

    Two important measures of the deformation in terms of the spatial and material coordinates are the left and right Cauchy-Green tensors, respectively, and denoted $\mathbf{b} \dealcoloneq \mathbf{F}\mathbf{F}^T$ and $\mathbf{C} \dealcoloneq \mathbf{F}^T\mathbf{F}$. They are both symmetric and positive definite.

    The Green-Lagrange strain tensor is defined by

    -\[
+<picture><source srcset=\[
         \mathbf{E} \dealcoloneq \frac{1}{2}[\mathbf{C} - \mathbf{I} ]
                 = \underbrace{\frac{1}{2}[\textrm{Grad}^T \mathbf{U} +  \textrm{Grad}\mathbf{U}]}_{\boldsymbol{\varepsilon}}
                         + \frac{1}{2}[\textrm{Grad}^T\ \mathbf{U}][\textrm{Grad}\ \mathbf{U}] \, .
-\] +\]" src="form_5183.png"/>

    If the assumption of infinitesimal deformations is made, then the second term on the right can be neglected, and $\boldsymbol{\varepsilon}$ (the linearised strain tensor) is the only component of the strain tensor. This assumption is, looking at the setup of the problem, not valid in step-18, making the use of the linearized $\boldsymbol{\varepsilon}$ as the strain measure in that tutorial program questionable.

    -

    In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient $\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

    -\[
+<p>In order to handle the different response that materials exhibit when subjected to bulk and shear type deformations we consider the following decomposition of the deformation gradient <picture><source srcset=$\mathbf{F}$ and the left Cauchy-Green tensor $\mathbf{b}$ into volume-changing (volumetric) and volume-preserving (isochoric) parts:

    +\[
         \mathbf{F}
                 = (J^{1/3}\mathbf{I})\overline{\mathbf{F}}
         \qquad \text{and} \qquad
         \mathbf{b}
         = (J^{2/3}\mathbf{I})\overline{\mathbf{F}}\,\overline{\mathbf{F}}^T
                 =  (J^{2/3}\mathbf{I})\overline{\mathbf{b}} \, .
-\] +\]" src="form_5184.png"/>

    -

    Clearly, $\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

    -

    The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

    -\[
+<p> Clearly, <picture><source srcset=$\textrm{det}\ \mathbf{F} = \textrm{det}\ (J^{1/3}\mathbf{I}) = J$.

    +

    The spatial velocity field is denoted $\mathbf{v}(\mathbf{x},t)$. The derivative of the spatial velocity field with respect to the spatial coordinates gives the spatial velocity gradient $\mathbf{l}(\mathbf{x},t)$, that is

    +\[
         \mathbf{l}(\mathbf{x},t)
                 \dealcoloneq \dfrac{\partial \mathbf{v}(\mathbf{x},t)}{\partial \mathbf{x}}
                 = \textrm{grad}\ \mathbf{v}(\mathbf{x},t) \, ,
-\] +\]" src="form_5188.png"/>

    -

    where $\textrm{grad} \{\bullet \}
+<p> where    <picture><source srcset=$\textrm{grad} \{\bullet \}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{x}}
 = \frac{\partial \{ \bullet \} }{ \partial \mathbf{X}}\frac{\partial \mathbf{X} }{ \partial \mathbf{x}}
-= \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$.

    += \textrm{Grad} \{ \bullet \} \mathbf{F}^{-1}$" src="form_5189.png"/>.

    Kinetics

    -

    Cauchy's stress theorem equates the Cauchy traction $\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

    -\[
+<p>Cauchy's stress theorem equates the Cauchy traction <picture><source srcset=$\mathbf{t}$ acting on an infinitesimal surface element in the current configuration $\mathrm{d}a$ to the product of the Cauchy stress tensor $\boldsymbol{\sigma}$ (a spatial quantity) and the outward unit normal to the surface $\mathbf{n}$ as

    +\[
         \mathbf{t}(\mathbf{x},t, \mathbf{n}) = \boldsymbol{\sigma}\mathbf{n} \, .
-\] +\]" src="form_5191.png"/>

    -

    The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction $\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

    -\[
+<p> The Cauchy stress is symmetric. Similarly, the first Piola-Kirchhoff traction <picture><source srcset=$\mathbf{T}$ which acts on an infinitesimal surface element in the reference configuration $\mathrm{d}A$ is the product of the first Piola-Kirchhoff stress tensor $\mathbf{P}$ (a two-point tensor) and the outward unit normal to the surface $\mathbf{N}$ as

    +\[
         \mathbf{T}(\mathbf{X},t, \mathbf{N}) = \mathbf{P}\mathbf{N} \, .
-\] +\]" src="form_5194.png"/>

    -

    The Cauchy traction $\mathbf{t}$ and the first Piola-Kirchhoff traction $\mathbf{T}$ are related as

    -\[
+<p> The Cauchy traction <picture><source srcset=$\mathbf{t}$ and the first Piola-Kirchhoff traction $\mathbf{T}$ are related as

    +\[
         \mathbf{t}\mathrm{d}a = \mathbf{T}\mathrm{d}A \, .
-\] +\]" src="form_5195.png"/>

    This can be demonstrated using Nanson's formula.

    The first Piola-Kirchhoff stress tensor is related to the Cauchy stress as

    -\[
+<picture><source srcset=\[
         \mathbf{P} = J \boldsymbol{\sigma}\mathbf{F}^{-T} \, .
-\] +\]" src="form_5196.png"/>

    -

    Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

    +

    Further important stress measures are the (spatial) Kirchhoff stress $\boldsymbol{\tau} = J \boldsymbol{\sigma}$ and the (referential) second Piola-Kirchhoff stress $\mathbf{S} = {\mathbf{F}}^{-1} \boldsymbol{\tau} {\mathbf{F}}^{-T}$.

    Push-forward and pull-back operators

    Push-forward and pull-back operators allow one to transform various measures between the material and spatial settings. The stress measures used here are contravariant, while the strain measures are covariant.

    -

    The push-forward and-pull back operations for second-order covariant tensors $(\bullet)^{\text{cov}}$ are respectively given by:

    -\[
+<p>The push-forward and-pull back operations for second-order covariant tensors <picture><source srcset=$(\bullet)^{\text{cov}}$ are respectively given by:

    +\[
         \chi_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{-T} (\bullet)^{\text{cov}} \mathbf{F}^{-1}
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{cov}} \dealcoloneq \mathbf{F}^{T} (\bullet)^{\text{cov}} \mathbf{F} \, .
-\] +\]" src="form_5200.png"/>

    -

    The push-forward and pull back operations for second-order contravariant tensors $(\bullet)^{\text{con}}$ are respectively given by:

    -\[
+<p>The push-forward and pull back operations for second-order contravariant tensors <picture><source srcset=$(\bullet)^{\text{con}}$ are respectively given by:

    +\[
         \chi_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F} (\bullet)^{\text{con}} \mathbf{F}^T
         \qquad \text{and} \qquad
         \chi^{-1}_{*}(\bullet)^{\text{con}} \dealcoloneq \mathbf{F}^{-1} (\bullet)^{\text{con}} \mathbf{F}^{-T} \, .
-\] +\]" src="form_5202.png"/>

    -

    For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

    +

    For example $\boldsymbol{\tau} = \chi_{*}(\mathbf{S})$.

    Hyperelastic materials

    -

    A hyperelastic material response is governed by a Helmholtz free energy function $\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

    -\[
+<p>A hyperelastic material response is governed by a Helmholtz free energy function <picture><source srcset=$\Psi = \Psi(\mathbf{F}) = \Psi(\mathbf{C}) = \Psi(\mathbf{b})$ which serves as a potential for the stress. For example, if the Helmholtz free energy depends on the right Cauchy-Green tensor $\mathbf{C}$ then the isotropic hyperelastic response is

    +\[
         \mathbf{S}
                 = 2 \dfrac{\partial \Psi(\mathbf{C})}{\partial \mathbf{C}} \, .
-\] +\]" src="form_5205.png"/>

    -

    If the Helmholtz free energy depends on the left Cauchy-Green tensor $\mathbf{b}$ then the isotropic hyperelastic response is

    -\[
+<p> If the Helmholtz free energy depends on the left Cauchy-Green tensor <picture><source srcset=$\mathbf{b}$ then the isotropic hyperelastic response is

    +\[
         \boldsymbol{\tau}
                 = 2 \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \mathbf{b}
                 =  2 \mathbf{b} \dfrac{\partial \Psi(\mathbf{b})}{\partial \mathbf{b}} \, .
-\] +\]" src="form_5206.png"/>

    Following the multiplicative decomposition of the deformation gradient, the Helmholtz free energy can be decomposed as

    -\[
+<picture><source srcset=\[
         \Psi(\mathbf{b}) = \Psi_{\text{vol}}(J) + \Psi_{\text{iso}}(\overline{\mathbf{b}}) \, .
-\] +\]" src="form_5207.png"/>

    -

    Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as $\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

    -\begin{align*}
+<p> Similarly, the Kirchhoff stress can be decomposed into volumetric and isochoric parts as <picture><source srcset=$\boldsymbol{\tau} = \boldsymbol{\tau}_{\text{vol}} + \boldsymbol{\tau}_{\text{iso}}$ where:

    +\begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-11-15 06:44:30.623682241 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_45.html	2024-11-15 06:44:30.623682241 +0000
@@ -145,14 +145,14 @@
 <div class= first_vector_components = <default value>);

    void collect_periodic_faces(const MeshType &mesh, const types::boundary_id b_id1, const types::boundary_id b_id2, const unsigned int direction, std::vector< PeriodicFacePair< typename MeshType::cell_iterator > > &matched_pairs, const Tensor< 1, MeshType::space_dimension > &offset=::Tensor< 1, MeshType::space_dimension >(), const FullMatrix< double > &matrix=FullMatrix< double >())

    This call loops over all faces of the container dof_handler on the periodic boundaries with boundary indicator b_id1 and b_id2, respectively. (You can assign these boundary indicators by hand after creating the coarse mesh, see Boundary indicator. Alternatively, you can also let many of the functions in namespace GridGenerator do this for if you specify the "colorize" flag; in that case, these functions will assign different boundary indicators to different parts of the boundary, with the details typically spelled out in the documentation of these functions.)

    -

    Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

    +

    Concretely, if $\text{vertices}_{1/2}$ are the vertices of two faces $\text{face}_{1/2}$, then the function call above will match pairs of faces (and dofs) such that the difference between $\text{vertices}_2$ and $matrix\cdot \text{vertices}_1+\text{offset}$ vanishes in every component apart from direction and stores the resulting pairs with associated data in matched_pairs. (See GridTools::orthogonal_equality() for detailed information about the matching process.)

    Consider, for example, the colored unit square $\Omega=[0,1]^2$ with boundary indicator 0 on the left, 1 on the right, 2 on the bottom and 3 on the top faces. (See the documentation of GridGenerator::hyper_cube() for this convention on how boundary indicators are assigned.) Then,

    /*b_id1*/ 0,
    /*b_id2*/ 1,
    /*direction*/ 0,
    matched_pairs);
    -

    would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

    -

    If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

    would yield periodicity constraints such that $u(0,y)=u(1,y)$ for all $y\in[0,1]$.

    +

    If we instead consider the parallelogram given by the convex hull of $(0,0)$, $(1,1)$, $(1,2)$, $(0,1)$ we can achieve the constraints $u(0,y)=u(1,y+1)$ by specifying an offset:

    /*b_id1*/ 0,
    /*b_id2*/ 1,
    /*direction*/ 0,
    @@ -182,18 +182,18 @@

    Here, we need to specify the orientation of the two faces using face_orientation, face_flip and face_orientation. For a closer description have a look at the documentation of DoFTools::make_periodicity_constraints. The remaining parameters are the same as for the high level interface apart from the self-explaining component_mask and affine_constraints.

    A practical example

    In the following, we show how to use the above functions in a more involved example. The task is to enforce rotated periodicity constraints for the velocity component of a Stokes flow.

    -

    On a quarter-circle defined by $\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

    -\begin{eqnarray*}
+<p>On a quarter-circle defined by <picture><source srcset=$\Omega=\{{\bf x}\in(0,1)^2:\|{\bf x}\|\in (0.5,1)\}$ we are going to solve the Stokes problem

    +\begin{eqnarray*}
   -\Delta \; \textbf{u} + \nabla p &=& (\exp(-100\|{\bf x}-(.75,0.1)^T\|^2),0)^T, \\
   -\textrm{div}\;  \textbf{u}&=&0,\\
   \textbf{u}|_{\Gamma_1}&=&{\bf 0},
-\end{eqnarray*} +\end{eqnarray*}" src="form_5412.png"/>

    -

    where the boundary $\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

    -\begin{align*}
+<p> where the boundary <picture><source srcset=$\Gamma_1$ is defined as $\Gamma_1 \dealcoloneq \{x\in \partial\Omega: \|x\|\in\{0.5,1\}\}$. For the remaining parts of the boundary we are going to use periodic boundary conditions, i.e.

    +\begin{align*}
   u_x(0,\nu)&=-u_y(\nu,0)&\nu&\in[0,1]\\
   u_y(0,\nu)&=u_x(\nu,0)&\nu&\in[0,1].
-\end{align*} +\end{align*}" src="form_5414.png"/>

    The mesh will be generated by GridGenerator::quarter_hyper_shell(), which also documents how it assigns boundary indicators to its various boundaries if its colorize argument is set to true.

    The commented program

    @@ -218,15 +218,15 @@
    Point< 3 > center
    void quarter_hyper_shell(Triangulation< dim > &tria, const Point< dim > &center, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, const bool colorize=false)
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
    -

    Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
-   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    -\begin{align*}
+</div><!-- fragment --><p>Before we can prescribe periodicity constraints, we need to ensure that cells on opposite sides of the domain but connected by periodic faces are part of the ghost layer if one of them is stored on the local processor. At this point we need to think about how we want to prescribe periodicity. The vertices <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot
+   \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5417.png"/>

    The data structure we are saving the resulting information into is here based on the Triangulation.

      std::vector<GridTools::PeriodicFacePair<
    @@ -310,23 +310,23 @@
    std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
    void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
    -

    After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the $\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    -\begin{align*}
+</div><!-- fragment --><p>After we provided the mesh with the necessary information for the periodicity constraints, we are now able to actual create them. For describing the matching we are using the same approach as before, i.e., the <picture><source srcset=$\text{vertices}_2$ of a face on the left boundary should be matched to the vertices $\text{vertices}_1$ of a face on the lower boundary given by $\text{vertices}_2=R\cdot \text{vertices}_1+b$ where the rotation matrix $R$ and the offset $b$ are given by

    +\begin{align*}
    R=\begin{pmatrix}
    0&1\\-1&0
    \end{pmatrix},
    \quad
    b=\begin{pmatrix}0&0\end{pmatrix}.
-   \end{align*} + \end{align*}" src="form_5417.png"/>

    -

    These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

    +

    These two objects not only describe how faces should be matched but also in which sense the solution should be transformed from $\text{face}_2$ to $\text{face}_1$.

      FullMatrix<double> rotation_matrix(dim);
      rotation_matrix[0][1] = 1.;
      rotation_matrix[1][0] = -1.;
     
      Tensor<1, dim> offset;
     
    -

    For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

    +

    For setting up the constraints, we first store the periodicity information in an auxiliary object of type std::vector<GridTools::PeriodicFacePair<typename DoFHandler<dim>::cell_iterator> . The periodic boundaries have the boundary indicators 2 (x=0) and 3 (y=0). All the other parameters we have set up before. In this case the direction does not matter. Due to $\text{vertices}_2=R\cdot \text{vertices}_1+b$ this is exactly what we want.

      std::vector<
      periodicity_vector;
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-11-15 06:44:30.691682848 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_46.html 2024-11-15 06:44:30.691682848 +0000 @@ -165,60 +165,60 @@

    Introduction

    This program deals with the problem of coupling different physics in different parts of the domain. Specifically, let us consider the following situation that couples a Stokes fluid with an elastic solid (these two problems were previously discussed separately in step-22 and step-8, where you may want to read up on the individual equations):

      -
    • In a part $\Omega_f$ of $\Omega$, we have a fluid flowing that satisfies the time independent Stokes equations (in the form that involves the strain tensor):

      -\begin{align*}
+<li>In a part <picture><source srcset=$\Omega_f$ of $\Omega$, we have a fluid flowing that satisfies the time independent Stokes equations (in the form that involves the strain tensor):

      +\begin{align*}
     -2\eta\nabla \cdot \varepsilon(\mathbf v) + \nabla p &= 0,
           \qquad \qquad && \text{in}\ \Omega_f\\
     -\nabla \cdot \mathbf v &= 0  && \text{in}\ \Omega_f.
-  \end{align*} + \end{align*}" src="form_5422.png"/>

      - Here, $\mathbf v, p$ are the fluid velocity and pressure, respectively. We prescribe the velocity on part of the external boundary,

      -\begin{align*}
+ Here, <picture><source srcset=$\mathbf v, p$ are the fluid velocity and pressure, respectively. We prescribe the velocity on part of the external boundary,

      +\begin{align*}
     \mathbf v = \mathbf v_0 \qquad\qquad
      \text{on}\ \Gamma_{f,1} \subset \partial\Omega \cap \partial\Omega_f
-  \end{align*} + \end{align*}" src="form_5424.png"/>

      while we assume free-flow conditions on the remainder of the external boundary,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     (2\eta \varepsilon(\mathbf v) - p \mathbf 1) \cdot \mathbf n = 0
      \qquad\qquad
      \text{on}\ \Gamma_{f,2} = \partial\Omega \cap \partial\Omega_f \backslash
      \Gamma_{f,1}.
-  \end{align*} + \end{align*}" src="form_5425.png"/>

    • -
    • The remainder of the domain, $\Omega_s = \Omega \backslash \Omega_f$ is occupied by a solid whose deformation field $\mathbf u$ satisfies the elasticity equation,

      -\begin{align*}
+<li>The remainder of the domain, <picture><source srcset=$\Omega_s = \Omega \backslash \Omega_f$ is occupied by a solid whose deformation field $\mathbf u$ satisfies the elasticity equation,

      +\begin{align*}
     -\nabla \cdot C \varepsilon(\mathbf u) = 0 \qquad\qquad
     & \text{in}\ \Omega_s,
-  \end{align*} + \end{align*}" src="form_5427.png"/>

      where $C$ is the rank-4 elasticity tensor (for which we will use a particularly simple form by assuming that the solid is isotropic). It deforms in reaction to the forces exerted by the fluid flowing along the boundary of the solid. We assume this deformation to be so small that it has no feedback effect on the fluid, i.e. the coupling is only in one direction. For simplicity, we will assume that the solid's external boundary is clamped, i.e.

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     \mathbf u = \mathbf 0 \qquad\qquad
      \text{on}\ \Gamma_{s,1} = \partial\Omega \cap \partial\Omega_s
-  \end{align*} + \end{align*}" src="form_5428.png"/>

    • As a consequence of the small displacement assumption, we will pose the following boundary conditions on the interface between the fluid and solid: first, we have no slip boundary conditions for the fluid,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     \mathbf v = \mathbf 0 \qquad\qquad
      \text{on}\ \Gamma_{i} = \partial\Omega_s \cap \partial\Omega_f.
-  \end{align*} + \end{align*}" src="form_5429.png"/>

      Secondly, the forces (traction) on the solid equal the normal stress from the fluid,

      -\begin{align*}
+<picture><source srcset=\begin{align*}
     (C \varepsilon(\mathbf u)) \mathbf n =
     (2 \eta \varepsilon(\mathbf v) - p \mathbf 1) \mathbf n \qquad\qquad
      \text{on}\ \Gamma_{i} = \partial\Omega_s \cap \partial\Omega_f,
-  \end{align*} + \end{align*}" src="form_5430.png"/>

      - where $\mathbf{n}$ is the normal vector on $\Gamma_{i}$ pointing from the solid to the fluid.
    • + where $\mathbf{n}$ is the normal vector on $\Gamma_{i}$ pointing from the solid to the fluid.
    -

    We get a weak formulation of this problem by following our usual rule of multiplying from the left by a test function and integrating over the domain. It then looks like this: Find $y = \{\mathbf v, p,
+<p>We get a weak formulation of this problem by following our usual rule of multiplying from the left by a test function and integrating over the domain. It then looks like this: Find   <picture><source srcset=$y = \{\mathbf v, p,
 \mathbf u\} \in Y \subset H^1(\Omega_f)^d \times L_2(\Omega_f) \times
-H^1(\Omega_s)^d$ such that

    -\begin{align*}
+H^1(\Omega_s)^d$ such that

    +\begin{align*}
         2 \eta (\varepsilon(\mathbf a), \varepsilon(\mathbf v))_{\Omega_f}
         - (\nabla \cdot \mathbf a, p)_{\Omega_f}
         - (q, \nabla \cdot \mathbf v)_{\Omega_f} &
@@ -229,15 +229,15 @@
            (2 \eta \varepsilon(\mathbf v) - p \mathbf 1) \mathbf n)_{\Gamma_i}
         &=
         0,
-\end{align*} +\end{align*}" src="form_5433.png"/>

    -

    for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

    +

    for all test functions $\mathbf a, q, \mathbf b$; the first, second, and third lines correspond to the fluid, solid, and interface contributions, respectively. Note that $Y$ is only a subspace of the spaces listed above to accommodate for the various Dirichlet boundary conditions.

    This sort of coupling is of course possible by simply having two Triangulation and two DoFHandler objects, one each for each of the two subdomains. On the other hand, deal.II is much simpler to use if there is a single DoFHandler object that knows about the discretization of the entire problem.

    This program is about how this can be achieved. Note that the goal is not to present a particularly useful physical model (a realistic fluid-structure interaction model would have to take into account the finite deformation of the solid and the effect this has on the fluid): this is, after all, just a tutorial program intended to demonstrate techniques, not to solve actual problems. Furthermore, we will make the assumption that the interface between the subdomains is aligned with coarse mesh cell faces.

    The general idea

    Before going into more details let us state the obvious: this is a problem with multiple solution variables; for this, you will probably want to read the Handling vector valued problems documentation topic first, which presents the basic philosophical framework in which we address problems with more than one solution variable. But back to the problem at hand:

    -

    The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables $\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

    -\begin{align*}
+<p>The fundamental idea to implement these sort of problems in deal.II goes as follows: in the problem formulation, the velocity and pressure variables <picture><source srcset=$\mathbf v, p$ only live in the fluid subdomain $\Omega_f$. But let's assume that we extend them by zero to the entire domain $\Omega$ (in the general case this means that they will be discontinuous along $\Gamma_i$). So what is the appropriate function space for these variables? We know that on $\Omega_f$ we should require $\mathbf v \in H^1(\Omega_f)^d, p \in L_2(\Omega_f)$, so for the extensions $\tilde{\mathbf v}, \tilde p$ to the whole domain the following appears a useful set of function spaces:

    +\begin{align*}
   \tilde {\mathbf v} &\in V
    = \{\tilde {\mathbf v}|_{\Omega_f} \in H^1(\Omega_f)^d, \quad
        \tilde {\mathbf v}|_{\Omega_s} = 0 \}
@@ -245,13 +245,13 @@
   \tilde p &\in P
   = \{\tilde p|_{\Omega_f} \in L_2(\Omega_f), \quad
        \tilde p|_{\Omega_s} = 0 \}.
-\end{align*} +\end{align*}" src="form_5438.png"/>

    -

    (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose $L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
-= 0\}$ for the pressure. None of these questions are relevant to the following discussion, however.)

    -

    Note that these are indeed a linear function spaces with obvious norm. Since no confusion is possible in practice, we will henceforth omit the tilde again to denote the extension of a function to the whole domain and simply refer by $\mathbf v, p$ to both the original and the extended function.

    -

    For discretization, we need finite dimensional subspaces $V_h,P_h$ of $V, P$. For Stokes, we know from step-22 that an appropriate choice is $Q_{p+1}^d\times Q_P$ but this only holds for that part of the domain occupied by the fluid. For the extended field, let's use the following subspaces defined on the triangulation $\mathbb T$:

    -\begin{align*}
+<p> (Since this is not important for the current discussion, we have omitted the question of boundary values from the choice of function spaces; this question also affects whether we can choose <picture><source srcset=$L_2$ for the pressure or whether we have to choose the space $L_{2,0}(\Omega_f)=\{q\in L_2(\Omega_f): \int_{\Omega_f} q
+= 0\}$ for the pressure. None of these questions are relevant to the following discussion, however.)

    +

    Note that these are indeed a linear function spaces with obvious norm. Since no confusion is possible in practice, we will henceforth omit the tilde again to denote the extension of a function to the whole domain and simply refer by $\mathbf v, p$ to both the original and the extended function.

    +

    For discretization, we need finite dimensional subspaces $V_h,P_h$ of $V, P$. For Stokes, we know from step-22 that an appropriate choice is $Q_{p+1}^d\times Q_P$ but this only holds for that part of the domain occupied by the fluid. For the extended field, let's use the following subspaces defined on the triangulation $\mathbb T$:

    +\begin{align*}
   V_h
    &= \{{\mathbf v}_h \quad | \quad
        \forall K \in {\mathbb T}:
@@ -267,23 +267,23 @@
        p_h|_{\Omega_f}\ \text{is continuous}, \quad
        p_h|_K = 0\ \text{if}\ K\subset {\Omega_s}\ \}
    && \subset P.
-\end{align*} +\end{align*}" src="form_5444.png"/>

    -

    In other words, on $\Omega_f$ we choose the usual discrete spaces but we keep the (discontinuous) extension by zero. The point to make is that we now need a description of a finite element space for functions that are zero on a cell — and this is where the FE_Nothing class comes in: it describes a finite dimensional function space of functions that are constant zero. A particular property of this peculiar linear vector space is that it has no degrees of freedom: it isn't just finite dimensional, it is in fact zero dimensional, and consequently for objects of this type, FiniteElement::n_dofs_per_cell() will return zero. For discussion below, let us give this space a proper symbol:

    -\[
+<p> In other words, on <picture><source srcset=$\Omega_f$ we choose the usual discrete spaces but we keep the (discontinuous) extension by zero. The point to make is that we now need a description of a finite element space for functions that are zero on a cell — and this is where the FE_Nothing class comes in: it describes a finite dimensional function space of functions that are constant zero. A particular property of this peculiar linear vector space is that it has no degrees of freedom: it isn't just finite dimensional, it is in fact zero dimensional, and consequently for objects of this type, FiniteElement::n_dofs_per_cell() will return zero. For discussion below, let us give this space a proper symbol:

    +\[
   Z = \{ \varphi: \varphi(x)=0 \}.
-\] +\]" src="form_5445.png"/>

    -

    The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

    +

    The symbol $Z$ reminds of the fact that functions in this space are zero. Obviously, we choose $Z_h=Z$.

    This entire discussion above can be repeated for the variables we use to describe the elasticity equation. Here, for the extended variables, we have

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \tilde {\mathbf u} &\in U
    = \{\tilde {\mathbf u}|_{\Omega_s} \in H^1(\Omega_f)^d, \quad
        \tilde {\mathbf u}|_{\Omega_f} \in Z(\Omega_s)^d \},
-\end{align*} +\end{align*}" src="form_5447.png"/>

    and we will typically use a finite element space of the kind

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   U_h
    &= \{{\mathbf u}_h \quad | \quad
        \forall K \in {\mathbb T}:
@@ -291,25 +291,25 @@
        {\mathbf u}_h|_{\Omega_f}\ \text{is continuous}, \quad
        {\mathbf u}_h|_K \in Z^d\ \text{if}\ K\subset {\Omega_f}\}
    && \subset U
-\end{align*} +\end{align*}" src="form_5448.png"/>

    of polynomial degree $r$.

    -

    So to sum up, we are going to look for a discrete vector-valued solution $y_h = \{\mathbf v_h, p_h, \mathbf u_h\}$ in the following space:

    -\begin{align*}
+<p>So to sum up, we are going to look for a discrete vector-valued solution <picture><source srcset=$y_h = \{\mathbf v_h, p_h, \mathbf u_h\}$ in the following space:

    +\begin{align*}
   Y_h = \{
       & y_h = \{\mathbf v_h, p_h, \mathbf u_h\} : \\
       & y_h|_{\Omega_f} \in Q_{p+1}^d \times Q_p \times Z^d, \\
       & y_h|_{\Omega_s} \in Z^d \times Z \times Q_r^d \}.
-\end{align*} +\end{align*}" src="form_5450.png"/>

    Implementation

    -

    So how do we implement this sort of thing? First, we realize that the discrete space $Y_h$ essentially calls for two different finite elements: First, on the fluid subdomain, we need the element $Q_{p+1}^d \times Q_p \times Z^d$ which in deal.II is readily implemented by

    +

    So how do we implement this sort of thing? First, we realize that the discrete space $Y_h$ essentially calls for two different finite elements: First, on the fluid subdomain, we need the element $Q_{p+1}^d \times Q_p \times Z^d$ which in deal.II is readily implemented by

    where FE_Nothing implements the space of functions that are always zero. Second, on the solid subdomain, we need the element $\in Z^d \times Z \times Q_r^d$, which we get using

    where FE_Nothing implements the space of functions that are always zero. Second, on the solid subdomain, we need the element $\in Z^d \times Z \times Q_r^d$, which we get using

    The next step is that we associate each of these two elements with the cells that occupy each of the two subdomains. For this we realize that in a sense the two elements are just variations of each other in that they have the same number of vector components but have different polynomial degrees — this smells very much like what one would do in $hp$ finite element methods, and it is exactly what we are going to do here: we are going to (ab)use the classes and facilities of the hp-namespace to assign different elements to different cells. In other words, we will use collect the two finite elements in an hp::FECollection, will integrate with an appropriate hp::QCollection using an hp::FEValues object, and our DoFHandler will be in hp-mode. You may wish to take a look at step-27 for an overview of all of these concepts.

    @@ -322,11 +322,11 @@

    Specifics of the implementation

    More specifically, in the program we have to address the following points:

    • Implementing the bilinear form, and in particular dealing with the interface term, both in the matrix and the sparsity pattern.
    • -
    • Implementing Dirichlet boundary conditions on the external and internal parts of the boundaries $\partial\Omega_f,\partial\Omega_s$.
    • +
    • Implementing Dirichlet boundary conditions on the external and internal parts of the boundaries $\partial\Omega_f,\partial\Omega_s$.

    Dealing with the interface terms

    Let us first discuss implementing the bilinear form, which at the discrete level we recall to be

    -\begin{align*}
+<picture><source srcset=\begin{align*}
         2 \eta (\varepsilon(\mathbf a_h), \varepsilon(\mathbf v_h))_{\Omega_f}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html	2024-11-15 06:44:30.755683420 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_47.html	2024-11-15 06:44:30.755683420 +0000
@@ -152,70 +152,70 @@
 <p><em>The first author would like to acknowledge the support of NSF Grant No. DMS-1520862. Timo Heister and Wolfgang Bangerth acknowledge support through NSF awards DMS-1821210, EAR-1550901, and OAC-1835673. </em></p>
 <p><a class=

    Introduction

    This program deals with the biharmonic equation,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega.
-\end{align*} +\end{align*}" src="form_5484.png"/>

    This equation appears in the modeling of thin structures such as roofs of stadiums. These objects are of course in reality three-dimensional with a large aspect ratio of lateral extent to perpendicular thickness, but one can often very accurately model these structures as two dimensional by making assumptions about how internal forces vary in the perpendicular direction. These assumptions lead to the equation above.

    The model typically comes in two different kinds, depending on what kinds of boundary conditions are imposed. The first case,

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \Delta u(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega,
-\end{align*} +\end{align*}" src="form_5485.png"/>

    -

    corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

    +

    corresponds to the edges of the thin structure attached to the top of a wall of height $g(\mathbf x)$ in such a way that the bending forces that act on the structure are $h(\mathbf x)$; in most physical situations, one will have $h=0$, corresponding to the structure simply sitting atop the wall.

    In the second possible case of boundary values, one would have

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \frac{\partial u(\mathbf x)}{\partial \mathbf n} &= j(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5488.png"/>

    -

    This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

    +

    This corresponds to a "clamped" structure for which a nonzero $j(\mathbf x)$ implies a certain angle against the horizontal.

    As with Dirichlet and Neumann boundary conditions for the Laplace equation, it is of course possible to have one kind of boundary conditions on one part of the boundary, and the other on the remainder.

    What's the issue?

    The fundamental issue with the equation is that it takes four derivatives of the solution. In the case of the Laplace equation we treated in step-3, step-4, and several other tutorial programs, one multiplies by a test function, integrates, integrates by parts, and ends up with only one derivative on both the test function and trial function – something one can do with functions that are continuous globally, but may have kinks at the interfaces between cells: The derivative may not be defined at the interfaces, but that is on a lower-dimensional manifold (and so doesn't show up in the integrated value).

    -

    But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where $\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

    -\begin{align*}
+<p>But for the biharmonic equation, if one followed the same procedure using integrals over the entire domain (i.e., the union of all cells), one would end up with two derivatives on the test functions and trial functions each. If one were to use the usual piecewise polynomial functions with their kinks on cell interfaces, the first derivative would yield a discontinuous gradient, and the second derivative with delta functions on the interfaces – but because both the second derivatives of the test functions and of the trial functions yield a delta function, we would try to integrate the product of two delta functions. For example, in 1d, where <picture><source srcset=$\varphi_i$ are the usual piecewise linear "hat functions", we would get integrals of the sort

    +\begin{align*}
   \int_0^L (\Delta \varphi_i) (\Delta \varphi_j)
   =
   \int_0^L
   \frac 1h \left[\delta(x-x_{i-1}) - 2\delta(x-x_i) + \delta(x-x_{i+1})\right]
   \frac 1h \left[\delta(x-x_{j-1}) - 2\delta(x-x_j) + \delta(x-x_{j+1})\right]
-\end{align*} +\end{align*}" src="form_5490.png"/>

    -

    where $x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

    -\begin{align*}
+<p> where <picture><source srcset=$x_i$ is the node location at which the shape function $\varphi_i$ is defined, and $h$ is the mesh size (assumed uniform). The problem is that delta functions in integrals are defined using the relationship

    +\begin{align*}
   \int_0^L \delta(x-\hat x) f(x) \; dx
   =
   f(\hat x).
-\end{align*} +\end{align*}" src="form_5491.png"/>

    -

    But that only works if (i) $f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

    -\begin{align*}
+<p> But that only works if (i) <picture><source srcset=$f(\cdot)$ is actually well defined at $\hat x$, and (ii) if it is finite. On the other hand, an integral of the form

    +\begin{align*}
 \int_0^L \delta(x-x_i) \delta (x-x_i)
-\end{align*} +\end{align*}" src="form_5494.png"/>

    does not make sense. Similar reasoning can be applied for 2d and 3d situations.

    In other words: This approach of trying to integrate over the entire domain and then integrating by parts can't work.

    Historically, numerical analysts have tried to address this by inventing finite elements that are "C<sup>1</sup> continuous", i.e., that use shape functions that are not just continuous but also have continuous first derivatives. This is the realm of elements such as the Argyris element, the Clough-Tocher element and others, all developed in the late 1960s. From a twenty-first century perspective, they can only be described as bizarre in their construction. They are also exceedingly cumbersome to implement if one wants to use general meshes. As a consequence, they have largely fallen out of favor and deal.II currently does not contain implementations of these shape functions.

    What to do instead?

    So how does one approach solving such problems then? That depends a bit on the boundary conditions. If one has the first set of boundary conditions, i.e., if the equation is

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \Delta u(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega,
-\end{align*} +\end{align*}" src="form_5495.png"/>

    -

    then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

    -\begin{align*}
+<p> then the following trick works (at least if the domain is convex, see below): In the same way as we obtained the mixed Laplace equation of <a class=step-20 from the regular Laplace equation by introducing a second variable, we can here introduce a variable $v=\Delta u$ and can then replace the equations above by the following, "mixed" system:

    +\begin{align*}
   -\Delta u(\mathbf x) +v(\mathbf x) &= 0
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   -\Delta v(\mathbf x) &= -f(\mathbf x)
@@ -224,28 +224,28 @@
   &&\forall \mathbf x \in \partial\Omega, \\
   v(\mathbf x) &= h(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5496.png"/>

    -

    In other words, we end up with what is in essence a system of two coupled Laplace equations for $u,v$, each with Dirichlet-type boundary conditions. We know how to solve such problems, and it should not be very difficult to construct good solvers and preconditioners for this system either using the techniques of step-20 or step-22. So this case is pretty simple to deal with.

    -
    Note
    It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic - regularity" implies that if the right hand side $v\in H^s$, then $u\in H^{s+2}$ if the domain is convex and the boundary is smooth enough. (This could also be guaranteed if the domain boundary is sufficiently smooth – but domains whose boundaries have no corners are not very practical in real life.) We know that $v\in H^1$ because it solves the equation $-\Delta v=f$, but we are still left with the condition on convexity of the boundary; one can show that polygonal, convex domains are good enough to guarantee that $u\in H^2$ in this case (smoothly bounded, convex domains would result in $u\in H^3$, but we don't need this much regularity). On the other hand, if the domain is not convex, we can not guarantee that the solution of the mixed system is in $H^2$, and consequently may obtain a solution that can't be equal to the solution of the original biharmonic equation.
    +

    In other words, we end up with what is in essence a system of two coupled Laplace equations for $u,v$, each with Dirichlet-type boundary conditions. We know how to solve such problems, and it should not be very difficult to construct good solvers and preconditioners for this system either using the techniques of step-20 or step-22. So this case is pretty simple to deal with.

    +
    Note
    It is worth pointing out that this only works for domains whose boundary has corners if the domain is also convex – in other words, if there are no re-entrant corners. This sounds like a rather random condition, but it makes sense in view of the following two facts: The solution of the original biharmonic equation must satisfy $u\in H^2(\Omega)$. On the other hand, the mixed system reformulation above suggests that both $u$ and $v$ satisfy $u,v\in H^1(\Omega)$ because both variables only solve a Poisson equation. In other words, if we want to ensure that the solution $u$ of the mixed problem is also a solution of the original biharmonic equation, then we need to be able to somehow guarantee that the solution of $-\Delta u=v$ is in fact more smooth than just $H^1(\Omega)$. This can be argued as follows: For convex domains, "elliptic + regularity" implies that if the right hand side $v\in H^s$, then $u\in H^{s+2}$ if the domain is convex and the boundary is smooth enough. (This could also be guaranteed if the domain boundary is sufficiently smooth – but domains whose boundaries have no corners are not very practical in real life.) We know that $v\in H^1$ because it solves the equation $-\Delta v=f$, but we are still left with the condition on convexity of the boundary; one can show that polygonal, convex domains are good enough to guarantee that $u\in H^2$ in this case (smoothly bounded, convex domains would result in $u\in H^3$, but we don't need this much regularity). On the other hand, if the domain is not convex, we can not guarantee that the solution of the mixed system is in $H^2$, and consequently may obtain a solution that can't be equal to the solution of the original biharmonic equation.

    The more complicated situation is if we have the "clamped" boundary conditions, i.e., if the equation looks like this:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \Delta^2 u(\mathbf x) &= f(\mathbf x)
   \qquad \qquad &&\forall \mathbf x \in \Omega, \\
   u(\mathbf x) &= g(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega, \\
   \frac{\partial u(\mathbf x)}{\partial \mathbf n} &= j(\mathbf x) \qquad \qquad
   &&\forall \mathbf x \in \partial\Omega.
-\end{align*} +\end{align*}" src="form_5507.png"/>

    -

    The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

    -

    The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

    +

    The same trick with the mixed system does not work here, because we would end up with both Dirichlet and Neumann boundary conditions for $u$, but none for $v$.

    +

    The solution to this conundrum arrived with the Discontinuous Galerkin method wave in the 1990s and early 2000s: In much the same way as one can use discontinuous shape functions for the Laplace equation by penalizing the size of the discontinuity to obtain a scheme for an equation that has one derivative on each shape function, we can use a scheme that uses continuous (but not $C^1$ continuous) shape functions and penalize the jump in the derivative to obtain a scheme for an equation that has two derivatives on each shape function. In analogy to the Interior Penalty (IP) method for the Laplace equation, this scheme for the biharmonic equation is typically called the $C^0$ IP (or C0IP) method, since it uses $C^0$ (continuous but not continuously differentiable) shape functions with an interior penalty formulation.

    It is worth noting that the C0IP method is not the only one that has been developed for the biharmonic equation. step-82 shows an alternative method.

    Derivation of the C0IP method

    -

    We base this program on the $C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

    -

    As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

    -\begin{align*}
+<p>We base this program on the <picture><source srcset=$C^0$ IP method presented by Susanne Brenner and Li-Yeng Sung in the paper "C \_form#href_anchor" [Brenner2005] where the method is derived for the biharmonic equation with "clamped" boundary conditions.

    +

    As mentioned, this method relies on the use of $C^0$ Lagrange finite elements where the $C^1$ continuity requirement is relaxed and has been replaced with interior penalty techniques. To derive this method, we consider a $C^0$ shape function $v_h$ which vanishes on $\partial\Omega$. We introduce notation $ \mathbb{F} $ as the set of all faces of $\mathbb{T}$, $ \mathbb{F}^b $ as the set of boundary faces, and $ \mathbb{F}^i $ as the set of interior faces for use further down below. Since the higher order derivatives of $v_h$ have two values on each interface $e\in \mathbb{F}$ (shared by the two cells $K_{+},K_{-} \in \mathbb{T}$), we cope with this discontinuity by defining the following single-valued functions on $e$:

    +\begin{align*}
   \jump{\frac{\partial^k v_h}{\partial \mathbf n^k}}
   &=
   \frac{\partial^k v_h|_{K_+}}{\partial \mathbf n^k} \bigg |_e
@@ -256,29 +256,29 @@
   \frac{1}{2}
   \bigg( \frac{\partial^k v_h|_{K_+}}{\partial \mathbf n^k} \bigg |_e
   + \frac{\partial^k v_h|_{K_-}}{\partial \mathbf n^k} \bigg |_e \bigg )
-\end{align*} +\end{align*}" src="form_5515.png"/>

    -

    for $k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

    -

    To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

    -\begin{align*}
+<p> for <picture><source srcset=$k =1,2$ (i.e., for the gradient and the matrix of second derivatives), and where $\mathbf n$ denotes a unit vector normal to $e$ pointing from $K_+$ to $K_-$. In the literature, these functions are referred to as the "jump" and "average" operations, respectively.

    +

    To obtain the $C^0$ IP approximation $u_h$, we left multiply the biharmonic equation by $v_h$, and then integrate over $\Omega$. As explained above, we can't do the integration by parts on all of $\Omega$ with these shape functions, but we can do it on each cell individually since the shape functions are just polynomials on each cell. Consequently, we start by using the following integration-by-parts formula on each mesh cell $K \in {\mathbb{T}}$:

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K v_h (\nabla\cdot\nabla) (\Delta w_h)
   \\
   &= -\int_K \nabla v_h \cdot (\nabla \Delta w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n).
-\end{align*} +\end{align*}" src="form_5520.png"/>

    -

    At this point, we have two options: We can integrate the domain term's $\nabla\Delta w_h$ one more time to obtain

    -\begin{align*}
+<p> At this point, we have two options: We can integrate the domain term's <picture><source srcset=$\nabla\Delta w_h$ one more time to obtain

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K (\Delta v_h) (\Delta w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
      -\int_{\partial K} (\nabla v_h \cdot \mathbf n) \Delta w_h.
-\end{align*} +\end{align*}" src="form_5522.png"/>

    For a variety of reasons, this turns out to be a variation that is not useful for our purposes.

    -

    Instead, what we do is recognize that $\nabla\Delta w_h = \text{grad}\,(\text{div}\,\text{grad}\, w_h)$, and we can re-sort these operations as $\nabla\Delta w_h = \text{div}\,(\text{grad}\,\text{grad}\, w_h)$ where we typically write $\text{grad}\,\text{grad}\, w_h = D^2 w_h$ to indicate that this is the "Hessian" matrix of second derivatives. With this re-ordering, we can now integrate the divergence, rather than the gradient operator, and we get the following instead:

    -\begin{align*}
+<p>Instead, what we do is recognize that <picture><source srcset=$\nabla\Delta w_h = \text{grad}\,(\text{div}\,\text{grad}\, w_h)$, and we can re-sort these operations as $\nabla\Delta w_h = \text{div}\,(\text{grad}\,\text{grad}\, w_h)$ where we typically write $\text{grad}\,\text{grad}\, w_h = D^2 w_h$ to indicate that this is the "Hessian" matrix of second derivatives. With this re-ordering, we can now integrate the divergence, rather than the gradient operator, and we get the following instead:

    +\begin{align*}
   \int_K v_h (\Delta^2 w_h)
   &= \int_K (\nabla \nabla v_h) : (\nabla \nabla w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
@@ -287,11 +287,11 @@
   &= \int_K (D^2 v_h) : (D^2 w_h)
      +\int_{\partial K} v_h (\nabla \Delta w_h \cdot \mathbf n)
      -\int_{\partial K} (\nabla v_h) \cdot (D^2 w_h \mathbf n).
-\end{align*} +\end{align*}" src="form_5526.png"/>

    -

    Here, the colon indicates a double-contraction over the indices of the matrices to its left and right, i.e., the scalar product between two tensors. The outer product of two vectors $a \otimes b$ yields the matrix $(a \otimes b)_{ij} = a_i b_j$.

    -

    Then, we sum over all cells $K \in  \mathbb{T}$, and take into account that this means that every interior face appears twice in the sum. If we therefore split everything into a sum of integrals over cell interiors and a separate sum over cell interfaces, we can use the jump and average operators defined above. There are two steps left: First, because our shape functions are continuous, the gradients of the shape functions may be discontinuous, but the continuity guarantees that really only the normal component of the gradient is discontinuous across faces whereas the tangential component(s) are continuous. Second, the discrete formulation that results is not stable as the mesh size goes to zero, and to obtain a stable formulation that converges to the correct solution, we need to add the following terms:

    -\begin{align*}
+<p> Here, the colon indicates a double-contraction over the indices of the matrices to its left and right, i.e., the scalar product between two tensors. The outer product of two vectors <picture><source srcset=$a \otimes b$ yields the matrix $(a \otimes b)_{ij} = a_i b_j$.

    +

    Then, we sum over all cells $K \in  \mathbb{T}$, and take into account that this means that every interior face appears twice in the sum. If we therefore split everything into a sum of integrals over cell interiors and a separate sum over cell interfaces, we can use the jump and average operators defined above. There are two steps left: First, because our shape functions are continuous, the gradients of the shape functions may be discontinuous, but the continuity guarantees that really only the normal component of the gradient is discontinuous across faces whereas the tangential component(s) are continuous. Second, the discrete formulation that results is not stable as the mesh size goes to zero, and to obtain a stable formulation that converges to the correct solution, we need to add the following terms:

    +\begin{align*}
 -\sum_{e \in \mathbb{F}} \int_{e}
   \average{\frac{\partial^2 v_h}{\partial \mathbf n^2}}
   \jump{\frac{\partial u_h}{\partial \mathbf n}}
@@ -299,16 +299,16 @@
   \frac{\gamma}{h_e}\int_e
   \jump{\frac{\partial v_h}{\partial \mathbf n}}
   \jump{\frac{\partial u_h}{\partial \mathbf n}}.
-\end{align*} +\end{align*}" src="form_5530.png"/>

    -

    Then, after making cancellations that arise, we arrive at the following C0IP formulation of the biharmonic equation: find $u_h$ such that $u_h =
-g$ on $\partial \Omega$ and

    -\begin{align*}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html	2024-11-15 06:44:30.807683884 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_48.html	2024-11-15 06:44:30.807683884 +0000
@@ -147,24 +147,24 @@
 <p><em> This program was contributed by Katharina Kormann and Martin Kronbichler.</em></p>
 <p><em>The algorithm for the matrix-vector product is based on the article <a href=A generic interface for parallel cell-based finite element operator application by Martin Kronbichler and Katharina Kormann, Computers and Fluids 63:135–147, 2012, and the paper "Parallel finite element operator application: Graph partitioning and coloring" by Katharina Kormann and Martin Kronbichler in: Proceedings of the 7th IEEE International Conference on e-Science, 2011.

    Introduction

    -

    This program demonstrates how to use the cell-based implementation of finite element operators with the MatrixFree class, first introduced in step-37, to solve nonlinear partial differential equations. Moreover, we have another look at the handling of constraints within the matrix-free framework. Finally, we will use an explicit time-stepping method to solve the problem and introduce Gauss-Lobatto finite elements that are very convenient in this case since their mass matrix can be accurately approximated by a diagonal, and thus trivially invertible, matrix. The two ingredients to this property are firstly a distribution of the nodal points of Lagrange polynomials according to the point distribution of the Gauss-Lobatto quadrature rule. Secondly, the quadrature is done with the same Gauss-Lobatto quadrature rule. In this formula, the integrals $\int_K \varphi_i \varphi_j
-dx\approx \sum_q \varphi_i \varphi_j \mathrm{det}(J) \big |_{x_q}$ become zero whenever $i\neq j$, because exactly one function $\varphi_j$ is one and all others zero in the points defining the Lagrange polynomials. Moreover, the Gauss-Lobatto distribution of nodes of Lagrange polynomials clusters the nodes towards the element boundaries. This results in a well-conditioned polynomial basis for high-order discretization methods. Indeed, the condition number of an FE_Q elements with equidistant nodes grows exponentially with the degree, which destroys any benefit for orders of about five and higher. For this reason, Gauss-Lobatto points are the default distribution for the FE_Q element (but at degrees one and two, those are equivalent to the equidistant points).

    +

    This program demonstrates how to use the cell-based implementation of finite element operators with the MatrixFree class, first introduced in step-37, to solve nonlinear partial differential equations. Moreover, we have another look at the handling of constraints within the matrix-free framework. Finally, we will use an explicit time-stepping method to solve the problem and introduce Gauss-Lobatto finite elements that are very convenient in this case since their mass matrix can be accurately approximated by a diagonal, and thus trivially invertible, matrix. The two ingredients to this property are firstly a distribution of the nodal points of Lagrange polynomials according to the point distribution of the Gauss-Lobatto quadrature rule. Secondly, the quadrature is done with the same Gauss-Lobatto quadrature rule. In this formula, the integrals $\int_K \varphi_i \varphi_j
+dx\approx \sum_q \varphi_i \varphi_j \mathrm{det}(J) \big |_{x_q}$ become zero whenever $i\neq j$, because exactly one function $\varphi_j$ is one and all others zero in the points defining the Lagrange polynomials. Moreover, the Gauss-Lobatto distribution of nodes of Lagrange polynomials clusters the nodes towards the element boundaries. This results in a well-conditioned polynomial basis for high-order discretization methods. Indeed, the condition number of an FE_Q elements with equidistant nodes grows exponentially with the degree, which destroys any benefit for orders of about five and higher. For this reason, Gauss-Lobatto points are the default distribution for the FE_Q element (but at degrees one and two, those are equivalent to the equidistant points).

    Problem statement and discretization

    As an example, we choose to solve the sine-Gordon soliton equation

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 u_{tt} &=& \Delta u -\sin(u) \quad\mbox{for}\quad (x,t) \in
 \Omega \times (t_0,t_f],\\
 {\mathbf n} \cdot \nabla u &=& 0
 \quad\mbox{for}\quad (x,t) \in \partial\Omega \times (t_0,t_f],\\
 u(x,t_0) &=& u_0(x).
-\end{eqnarray*} +\end{eqnarray*}" src="form_5605.png"/>

    that was already introduced in step-25. As a simple explicit time integration method, we choose leap frog scheme using the second-order formulation of the equation. With this time stepping, the scheme reads in weak form

    -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
 (v,u^{n+1}) = (v,2 u^n-u^{n-1} -
 (\Delta t)^2 \sin(u^n)) - (\nabla v, (\Delta t)^2 \nabla u^n),
-\end{eqnarray*} +\end{eqnarray*}" src="form_5606.png"/>

    where v denotes a test function and the index n stands for the time step number.

    For the spatial discretization, we choose FE_Q elements with basis functions defined to interpolate the support points of the Gauss-Lobatto quadrature rule. Moreover, when we compute the integrals over the basis functions to form the mass matrix and the operator on the right hand side of the equation above, we use the Gauss-Lobatto quadrature rule with the same support points as the node points of the finite element to evaluate the integrals. Since the finite element is Lagrangian, this will yield a diagonal mass matrix on the left hand side of the equation, making the solution of the linear system in each time step trivial.

    @@ -172,19 +172,19 @@

    Apart from the fact that we avoid solving linear systems with this type of elements when using explicit time-stepping, they come with two other advantages. When we are using the sum-factorization approach to evaluate the finite element operator (cf. step-37), we have to evaluate the function at the quadrature points. In the case of Gauss-Lobatto elements, where quadrature points and node points of the finite element coincide, this operation is trivial since the value of the function at the quadrature points is given by its one-dimensional coefficients. In this way, the arithmetic work for the finite element operator evaluation is reduced by approximately a factor of two compared to the generic Gaussian quadrature.

    To sum up the discussion, by using the right finite element and quadrature rule combination, we end up with a scheme where we only need to compute the right hand side vector corresponding to the formulation above and then multiply it by the inverse of the diagonal mass matrix in each time step. In practice, of course, we extract the diagonal elements and invert them only once at the beginning of the program.

    Implementation of constraints

    -

    The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

    +

    The usual way to handle constraints in deal.II is to use the AffineConstraints class that builds a sparse matrix storing information about which degrees of freedom (DoF) are constrained and how they are constrained. This format uses an unnecessarily large amount of memory since there are not so many different types of constraints: for example, in the case of hanging nodes when using linear finite element on every cell, most constraints have the form $x_k = \frac 12 x_i + \frac 12 x_j$ where the coefficients $\frac 12$ are always the same and only $i,j,k$ are different. While storing this redundant information is not a problem in general because it is only needed once during matrix and right hand side assembly, it becomes a bottleneck in the matrix-free approach since there this information has to be accessed every time we apply the operator, and the remaining components of the operator evaluation are so fast. Thus, instead of an AffineConstraints object, MatrixFree uses a variable that we call constraint_pool that collects the weights of the different constraints. Then, only an identifier of each constraint in the mesh instead of all the weights have to be stored. Moreover, the constraints are not applied in a pre- and postprocessing step but rather as we evaluate the finite element operator. Therefore, the constraint information is embedded into the variable indices_local_to_global that is used to extract the cell information from the global vector. If a DoF is constrained, the indices_local_to_global variable contains the global indices of the DoFs that it is constrained to. Then, we have another variable constraint_indicator at hand that holds, for each cell, the local indices of DoFs that are constrained as well as the identifier of the type of constraint. Fortunately, you will not see these data structures in the example program since the class FEEvaluation takes care of the constraints without user interaction.

    In the presence of hanging nodes, the diagonal mass matrix obtained on the element level via the Gauss-Lobatto quadrature/node point procedure does not directly translate to a diagonal global mass matrix, as following the constraints on rows and columns would also add off-diagonal entries. As explained in Kormann (2016), interpolating constraints on a vector, which maintains the diagonal shape of the mass matrix, is consistent with the equations up to an error of the same magnitude as the quadrature error. In the program below, we will simply assemble the diagonal of the mass matrix as if it were a vector to enable this approximation.

    Parallelization

    The MatrixFree class comes with the option to be parallelized on three levels: MPI parallelization on clusters of distributed nodes, thread parallelization scheduled by the Threading Building Blocks library, and finally with a vectorization by working on a batch of two (or more) cells via SIMD data type (sometimes called cross-element or external vectorization). As we have already discussed in step-37, you will get best performance by using an instruction set specific to your system, e.g. with the cmake variable -DCMAKE_CXX_FLAGS="-march=native". The MPI parallelization was already exploited in step-37. Here, we additionally consider thread parallelization with TBB. This is fairly simple, as all we need to do is to tell the initialization of the MatrixFree object about the fact that we want to use a thread parallel scheme through the variable MatrixFree::AdditionalData::thread_parallel_scheme. During setup, a dependency graph is set up similar to the one described in the workstream_paper , which allows to schedule the work of the local_apply function on chunks of cells without several threads accessing the same vector indices. As opposed to the WorkStream loops, some additional clever tricks to avoid global synchronizations as described in Kormann and Kronbichler (2011) are also applied.

    Note that this program is designed to be run with a distributed triangulation (parallel::distributed::Triangulation), which requires deal.II to be configured with p4est as described in the deal.II ReadMe file. However, a non-distributed triangulation is also supported, in which case the computation will be run in serial.

    The test case

    In our example, we choose the initial value to be

    -\begin{eqnarray*} u(x,t) =
+<picture><source srcset=\begin{eqnarray*} u(x,t) =
 \prod_{i=1}^{d} -4 \arctan \left(
 \frac{m}{\sqrt{1-m^2}}\frac{\sin\left(\sqrt{1-m^2} t +c_2\right)}{\cosh(mx_i+c_1)}\right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_5609.png"/>

    -

    and solve the equation over the time interval [-10,10]. The constants are chosen to be $c_1=c_1=0$ and m=0.5. As mentioned in step-25, in one dimension u as a function of t is the exact solution of the sine-Gordon equation. For higher dimension, this is however not the case.

    +

    and solve the equation over the time interval [-10,10]. The constants are chosen to be $c_1=c_1=0$ and m=0.5. As mentioned in step-25, in one dimension u as a function of t is the exact solution of the sine-Gordon equation. For higher dimension, this is however not the case.

    The commented program

    The necessary files from the deal.II library.

      #href_anchor"line">  #include <deal.II/base/utilities.h>
    @@ -457,7 +457,7 @@
    STL namespace.

    SineGordonProblem::make_grid_and_dofs

    -

    As in step-25 this functions sets up a cube grid in dim dimensions of extent $[-15,15]$. We refine the mesh more in the center of the domain since the solution is concentrated there. We first refine all cells whose center is within a radius of 11, and then refine once more for a radius 6. This simple ad hoc refinement could be done better by adapting the mesh to the solution using error estimators during the time stepping as done in other example programs, and using parallel::distributed::SolutionTransfer to transfer the solution to the new mesh.

    +

    As in step-25 this functions sets up a cube grid in dim dimensions of extent $[-15,15]$. We refine the mesh more in the center of the domain since the solution is concentrated there. We first refine all cells whose center is within a radius of 11, and then refine once more for a radius 6. This simple ad hoc refinement could be done better by adapting the mesh to the solution using error estimators during the time stepping as done in other example programs, and using parallel::distributed::SolutionTransfer to transfer the solution to the new mesh.

      template <int dim>
      void SineGordonProblem<dim>::make_grid_and_dofs()
      {
    @@ -720,13 +720,13 @@   MF SpMV dealii MF dealii -2D, $\mathcal{Q}_2$ 0.0106 0.00971 0.109 0.0243 0.124 +2D, $\mathcal{Q}_2$ 0.0106 0.00971 0.109 0.0243 0.124 -2D, $\mathcal{Q}_4$ 0.0328 0.0706 0.528 0.0714 0.502 +2D, $\mathcal{Q}_4$ 0.0328 0.0706 0.528 0.0714 0.502 -3D, $\mathcal{Q}_2$ 0.0151 0.0320 0.331 0.0376 0.364 +3D, $\mathcal{Q}_2$ 0.0151 0.0320 0.331 0.0376 0.364 -3D, $\mathcal{Q}_4$ 0.0918 0.844 6.83 0.194 6.95 +3D, $\mathcal{Q}_4$ 0.0918 0.844 6.83 0.194 6.95

    It is apparent that the matrix-free code outperforms the standard assembly routines in deal.II by far. In 3D and for fourth order elements, one operator evaluation is also almost ten times as fast as a sparse matrix-vector product.

    Parallel run in 2D and 3D

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-11-15 06:44:30.851684277 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_49.html 2024-11-15 06:44:30.851684277 +0000 @@ -285,7 +285,7 @@ regular input mesh output mesh -

    Similarly, we can transform a regularly refined unit square to a wall-adapted mesh in y direction using the formula $(x,y) \mapsto (x,\tanh(2 y)/\tanh(2))$. This is done in grid_6() of this tutorial:

    +

    Similarly, we can transform a regularly refined unit square to a wall-adapted mesh in y direction using the formula $(x,y) \mapsto (x,\tanh(2 y)/\tanh(2))$. This is done in grid_6() of this tutorial:

    regular input mesh wall-adapted output mesh
    @@ -454,7 +454,7 @@
    void merge_triangulations(const Triangulation< dim, spacedim > &triangulation_1, const Triangulation< dim, spacedim > &triangulation_2, Triangulation< dim, spacedim > &result, const double duplicated_vertex_tolerance=1.0e-12, const bool copy_manifold_ids=false, const bool copy_boundary_ids=false)

    grid_3: Moving vertices

    In this function, we move vertices of a mesh. This is simpler than one usually expects: if you ask a cell using cell->vertex(i) for the coordinates of its ith vertex, it doesn't just provide the location of this vertex but in fact a reference to the location where these coordinates are stored. We can then modify the value stored there.

    -

    So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

    +

    So this is what we do in the first part of this function: We create a square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located at the origin. We then loop over all cells and all vertices and if a vertex has a $y$ coordinate equal to one, we move it upward by 0.5.

    Note that this sort of procedure does not usually work this way because one will typically encounter the same vertices multiple times and may move them more than once. It works here because we select the vertices we want to use based on their geometric location, and a vertex moved once will fail this test in the future. A more general approach to this problem would have been to keep a std::set of those vertex indices that we have already moved (which we can obtain using cell->vertex_index(i) and only move those vertices whose index isn't in the set yet.

      void grid_3()
      {
    @@ -494,8 +494,8 @@
     
    void extrude_triangulation(const Triangulation< 2, 2 > &input, const unsigned int n_slices, const double height, Triangulation< 3, 3 > &result, const bool copy_manifold_ids=false, const std::vector< types::manifold_id > &manifold_priorities={})

    grid_5: Demonstrating GridTools::transform, part 1

    -

    This and the next example first create a mesh and then transform it by moving every node of the mesh according to a function that takes a point and returns a mapped point. In this case, we transform $(x,y) \mapsto
-   (x,y+\sin(\pi x/5))$.

    +

    This and the next example first create a mesh and then transform it by moving every node of the mesh according to a function that takes a point and returns a mapped point. In this case, we transform $(x,y) \mapsto
+   (x,y+\sin(\pi x/5))$.

    GridTools::transform() takes a triangulation and an argument that can be called like a function taking a Point and returning a Point. There are different ways of providing such an argument: It could be a pointer to a function; it could be an object of a class that has an operator(); it could be a lambda function; or it could be anything that is described via a std::function<Point<2>(const Point<2>)> object.

    Decidedly the more modern way is to use a lambda function that takes a Point and returns a Point, and that is what we do in the following:

      void grid_5()
    @@ -523,7 +523,7 @@
    static constexpr double PI
    Definition numbers.h:259
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    grid_6: Demonstrating GridTools::transform, part 2

    -

    In this second example of transforming points from an original to a new mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To make things more interesting, rather than doing so in a single function as in the previous example, we here create an object with an operator() that will be called by GridTools::transform. Of course, this object may in reality be much more complex: the object may have member variables that play a role in computing the new locations of vertices.

    +

    In this second example of transforming points from an original to a new mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To make things more interesting, rather than doing so in a single function as in the previous example, we here create an object with an operator() that will be called by GridTools::transform. Of course, this object may in reality be much more complex: the object may have member variables that play a role in computing the new locations of vertices.

      struct Grid6Func
      {
      double trans(const double y) const
    @@ -798,9 +798,9 @@

    This creates the following mesh:

    -

    This mesh has the right general shape, but the top cells are now polygonal: their edges are no longer along circles and we do not have a very accurate representation of the original geometry. The next step is to teach the top part of the domain that it should be curved. Put another way, all calculations done on the top boundary cells should be done in cylindrical coordinates rather than Cartesian coordinates. We can do this by creating a CylindricalManifold object and associating it with the cells above $y = 3$. This way, when we refine the cells on top, we will place new points along concentric circles instead of straight lines.

    -

    In deal.II we describe all geometries with classes that inherit from Manifold. The default geometry is Cartesian and is implemented in the FlatManifold class. As the name suggests, Manifold and its inheriting classes provide a way to describe curves and curved cells in a general way with ideas and terminology from differential geometry: for example, CylindricalManifold inherits from ChartManifold, which describes a geometry through pull backs and push forwards. In general, one should think that the Triangulation class describes the topology of a domain (in addition, of course, to storing the locations of the vertices) while the Manifold classes describe the geometry of a domain (e.g., whether or not a pair of vertices lie along a circular arc or a straight line). A Triangulation will refine cells by doing computations with the Manifold associated with that cell regardless of whether or not the cell is on the boundary. Put another way: the Manifold classes do not need any information about where the boundary of the Triangulation actually is: it is up to the Triangulation to query the right Manifold for calculations on a cell. Most Manifold functions (e.g., Manifold::get_intermediate_point) know nothing about the domain itself and just assume that the points given to it lie along a geodesic. In this case, with the CylindricalManifold constructed below, the geodesics are arcs along circles orthogonal to the $z$-axis centered along the line $(0, 3, z)$.

    -

    Since all three top parts of the domain use the same geodesics, we will mark all cells with centers above the $y = 3$ line as being cylindrical in nature:

    +

    This mesh has the right general shape, but the top cells are now polygonal: their edges are no longer along circles and we do not have a very accurate representation of the original geometry. The next step is to teach the top part of the domain that it should be curved. Put another way, all calculations done on the top boundary cells should be done in cylindrical coordinates rather than Cartesian coordinates. We can do this by creating a CylindricalManifold object and associating it with the cells above $y = 3$. This way, when we refine the cells on top, we will place new points along concentric circles instead of straight lines.

    +

    In deal.II we describe all geometries with classes that inherit from Manifold. The default geometry is Cartesian and is implemented in the FlatManifold class. As the name suggests, Manifold and its inheriting classes provide a way to describe curves and curved cells in a general way with ideas and terminology from differential geometry: for example, CylindricalManifold inherits from ChartManifold, which describes a geometry through pull backs and push forwards. In general, one should think that the Triangulation class describes the topology of a domain (in addition, of course, to storing the locations of the vertices) while the Manifold classes describe the geometry of a domain (e.g., whether or not a pair of vertices lie along a circular arc or a straight line). A Triangulation will refine cells by doing computations with the Manifold associated with that cell regardless of whether or not the cell is on the boundary. Put another way: the Manifold classes do not need any information about where the boundary of the Triangulation actually is: it is up to the Triangulation to query the right Manifold for calculations on a cell. Most Manifold functions (e.g., Manifold::get_intermediate_point) know nothing about the domain itself and just assume that the points given to it lie along a geodesic. In this case, with the CylindricalManifold constructed below, the geodesics are arcs along circles orthogonal to the $z$-axis centered along the line $(0, 3, z)$.

    +

    Since all three top parts of the domain use the same geodesics, we will mark all cells with centers above the $y = 3$ line as being cylindrical in nature:

    const Tensor<1, 3> axis({0.0, 0.0, 1.0});
    const Point<3> axial_point(0, 3.0, 0.0);
    const CylindricalManifold<3> cylinder(axis, axial_point);
    @@ -820,7 +820,7 @@

    With this code, we get a mesh that looks like this:

    -

    This change fixes the boundary but creates a new problem: the cells adjacent to the cylinder's axis are badly distorted. We should use Cartesian coordinates for calculations on these central cells to avoid this issue. The cells along the center line all have a face that touches the line $(0, 3, z)$ so, to implement this, we go back and overwrite the manifold_ids on these cells to be zero (which is the default):

    +

    This change fixes the boundary but creates a new problem: the cells adjacent to the cylinder's axis are badly distorted. We should use Cartesian coordinates for calculations on these central cells to avoid this issue. The cells along the center line all have a face that touches the line $(0, 3, z)$ so, to implement this, we go back and overwrite the manifold_ids on these cells to be zero (which is the default):

    const Tensor<1, 3> axis({0.0, 0.0, 1.0});
    const Point<3> axial_point(0, 3.0, 0.0);
    const CylindricalManifold<3> cylinder(axis, axial_point);
    @@ -852,7 +852,7 @@

    Possibilities for extensions

    Assigning different boundary ids

    It is often useful to assign different boundary ids to a mesh that is generated in one form or another as described in this tutorial to apply different boundary conditions.

    -

    For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

    +

    For example, you might want to apply a different boundary condition for the right boundary of the first grid in this program. To do this, iterate over the cells and their faces and identify the correct faces (for example using cell->center() to query the coordinates of the center of a cell as we do in step-1, or using cell->face(f)->get_boundary_id() to query the current boundary indicator of the $f$th face of the cell). You can then use cell->face(f)->set_boundary_id() to set the boundary id to something different. You can take a look back at step-1 how iteration over the meshes is done there.

    Extracting a boundary mesh

    Computations on manifolds, like they are done in step-38, require a surface mesh embedded into a higher dimensional space. While some can be constructed using the GridGenerator namespace or loaded from a file, it is sometimes useful to extract a surface mesh from a volume mesh.

    Use the function GridGenerator::extract_boundary_mesh() to extract the surface elements of a mesh. Using the function on a 3d mesh (a Triangulation<3,3>, for example from grid_4()), this will return a Triangulation<2,3> that you can use in step-38. Also try extracting the boundary mesh of a Triangulation<2,2>.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-11-15 06:44:30.883684563 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_5.html 2024-11-15 06:44:30.883684563 +0000 @@ -149,39 +149,39 @@ cell data") format: When this program was first written around 2000, the UCD format was what the AVS Explorer used – a program reasonably widely used at the time though today no longer of importance. The file format itself has survived and is still widely understood, but because GridIn reads so many different formats, the specific choice used in this tutorial program is perhaps not all that important.

    Solving a generalized Laplace (Poisson) equation

    The equation to solve here is as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\nabla \cdot a(\mathbf x) \nabla u(\mathbf x) &= 1 \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega.
-\end{align*} +\end{align*}" src="form_5619.png"/>

    -

    If $a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation that we have already solved in step-3 and step-4. However, if it is indeed spatially variable, it is a more complex equation (sometimes referred to as the "Poisson equation with a coefficient"). Specifically, we will here choose it as follows:

    -\begin{align*}
+<p> If <picture><source srcset=$a(\mathbf x)$ was a constant coefficient, this would simply be the Poisson equation that we have already solved in step-3 and step-4. However, if it is indeed spatially variable, it is a more complex equation (sometimes referred to as the "Poisson equation with a coefficient"). Specifically, we will here choose it as follows:

    +\begin{align*}
   a(\mathbf x) =
   \begin{cases}
     20 & \text{if}\ |\mathbf x|<0.5, \\
     1  & \text{otherwise.}
   \end{cases}
-\end{align*} +\end{align*}" src="form_5620.png"/>

    Depending on what the variable $u$ refers to, it models a variety of situations with wide applicability:

      -
    • If $u$ is the electric potential, then $-a\nabla u$ is the electric current in a medium and the coefficient $a$ is the conductivity of the medium at any given point. (In this situation, the right hand side of the equation would be the electric source density and would usually be zero or consist of localized, Delta-like, functions if specific points of the domain are connected to current sources that send electrons into or out of the domain.) In many media, $a=a(\mathbf x)$ is indeed spatially variable because the medium is not homogeneous. For example, in electrical impedance tomography, a biomedical imaging technique, one wants to image the body's interior by sending electric currents through the body between electrodes attached to the skin; in this case, $a(\mathbf x)$ describes the electrical conductivity of the different parts of the human body – so $a(\mathbf x)$ would be large for points $\mathbf x$ that lie in organs well supplied by blood (such as the heart), whereas it would be small for organs such as the lung that do not conduct electricity well (because air is a poor conductor). Similarly, if you are simulating an electronic device, $a(\mathbf x)$ would be large in parts of the volume occupied by conductors such as copper, gold, or aluminum; it would have intermediate values for parts of the volume occupied by semiconductors such as silicon; and it would be small in non-conducting and insulating parts of the volume (e.g., those occupied by air, or the circuit board on which the electronics are mounted).
    • -
    • If we are describing the vertical deflection $u$ of a thin membrane under a vertical force $f$, then $a$ would be a measure of the local stiffness of the membrane, which can be spatially variable if the membrane is made from different materials, or if the thickness of the membrane varies spatially. This is the interpretation of the equation that will allow us to interpret the images shown in the results section below.
    • +
    • If $u$ is the electric potential, then $-a\nabla u$ is the electric current in a medium and the coefficient $a$ is the conductivity of the medium at any given point. (In this situation, the right hand side of the equation would be the electric source density and would usually be zero or consist of localized, Delta-like, functions if specific points of the domain are connected to current sources that send electrons into or out of the domain.) In many media, $a=a(\mathbf x)$ is indeed spatially variable because the medium is not homogeneous. For example, in electrical impedance tomography, a biomedical imaging technique, one wants to image the body's interior by sending electric currents through the body between electrodes attached to the skin; in this case, $a(\mathbf x)$ describes the electrical conductivity of the different parts of the human body – so $a(\mathbf x)$ would be large for points $\mathbf x$ that lie in organs well supplied by blood (such as the heart), whereas it would be small for organs such as the lung that do not conduct electricity well (because air is a poor conductor). Similarly, if you are simulating an electronic device, $a(\mathbf x)$ would be large in parts of the volume occupied by conductors such as copper, gold, or aluminum; it would have intermediate values for parts of the volume occupied by semiconductors such as silicon; and it would be small in non-conducting and insulating parts of the volume (e.g., those occupied by air, or the circuit board on which the electronics are mounted).
    • +
    • If we are describing the vertical deflection $u$ of a thin membrane under a vertical force $f$, then $a$ would be a measure of the local stiffness of the membrane, which can be spatially variable if the membrane is made from different materials, or if the thickness of the membrane varies spatially. This is the interpretation of the equation that will allow us to interpret the images shown in the results section below.

    Since the Laplace/Poisson equation appears in so many contexts, there are of course many more uses than just the two listed above, each providing a different interpretation what a spatially variable coefficient would mean in that context.

    What you should have taken away from this is that equations with spatially variable coefficients in the differential operator are quite common, and indeed quite useful in describing the world around us. As a consequence, we should be able to reflect such cases in the numerical methods we use. It turns out that it is not entirely obvious how to deal with such spatially variable coefficients in finite difference methods (though it is also not too complicated to come with ways to do that systematically). But we are using finite element methods, and for these it is entirely trivial to incorporate such coefficients: You just do what you always do, namely multiply by a test function, then integrate by parts. This yields the weak form, which here reads as follows:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   \int_\Omega a(\mathbf x) \nabla \varphi(\mathbf x) \cdot
             \nabla u(\mathbf x) \; dx
   &=
   \int_\Omega \varphi(\mathbf x) f(\mathbf x) \; dx \qquad \qquad \forall \varphi.
-\end{align*} +\end{align*}" src="form_5623.png"/>

    For this program here, we will specifically use $f(\mathbf x)=1$. In our usual short-hand notation, the equation's weak form can then be written as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   (a \nabla \varphi, \nabla u) &= (\varphi, 1) \qquad \qquad \forall \varphi.
-\end{align*} +\end{align*}" src="form_5624.png"/>

    As you will recall from step-3 and step-4, the weak formulation is implemented in the assemble_system function, substituting integrals by quadrature. Indeed, what you will find in this program is that as before, the implementation follows immediately from the statement of the weak form above.

    Support for debugging: Assertions

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-11-15 06:44:30.971685349 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_50.html 2024-11-15 06:44:30.971685349 +0000 @@ -167,14 +167,14 @@ (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h \end{align*}" src="form_5625.png"/>

    -

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    -

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    +

    on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. In other words, $\epsilon$ is small along the edges or faces of the domain that run into the reentrant corner, as will be visible in the figure below.

    +

    The boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. We use continuous $Q_2$ elements for the discrete finite element space $V_h$, and use a residual-based, cell-wise a posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from [karakashian2003posteriori] with

    \begin{align*}
  e_{\text{cell}}(K) &= h^2 \| f + \epsilon \triangle u \|_K^2, \\
  e_{\text{face}}(K) &= \sum_F h_F \| \jump{ \epsilon \nabla u \cdot n } \|_F^2,
 \end{align*}

    -

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    +

    to adaptively refine the mesh. (This is a generalization of the Kelly error estimator used in the KellyErrorEstimator class that drives mesh refinement in most of the other tutorial programs.) The following figure visualizes the solution and refinement for 2D: In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the center of the domain showing the adaptively refined mesh.

    @@ -184,7 +184,7 @@

    For the active mesh, we use the parallel::distributed::Triangulation class as done in step-40 which uses functionality in the external library p4est for the distribution of the active cells among processors. For the non-active cells in the multilevel hierarchy, deal.II implements what we will refer to as the "first-child rule" where, for each cell in the hierarchy, we recursively assign the parent of a cell to the owner of the first child cell. The following figures give an example of such a distribution. Here the left image represents the active cells for a sample 2D mesh partitioned using a space-filling curve (which is what p4est uses to partition cells); the center image gives the tree representation of the active mesh; and the right image gives the multilevel hierarchy of cells. The colors and numbers represent the different processors. The circular nodes in the tree are the non-active cells which are distributed using the "first-child rule".

    Included among the output to screen in this example is a value "Partition efficiency" given by one over MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy. This imbalance is evident from the example above: while level $\ell=2$ is about as well balanced as is possible with four cells among three processors, the coarse level $\ell=0$ has work for only one processor, and level $\ell=1$ has work for only two processors of which one has three times as much work as the other.

    -

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    +

    For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing to define the multigrid hierarchy (see the multigrid paper for a description of local smoothing), the refinement level of a cell corresponds to that cell's multigrid level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ (both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. We will also denote by $P$ the total number of processors. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by

    \begin{align*}
 W_{\text{opt}} = \frac1{P}\sum_{\ell} N_{\ell} = \sum_{\ell}\left(\frac1{P}\sum_{p}N_{\ell,p}\right).
 \end{align*} @@ -217,7 +217,7 @@

    These sorts of considerations are considered in much greater detail in [clevenger_par_gmg], which contains a full discussion of the partition efficiency model and the effect the imbalance has on the GMG V-cycle timing. In summary, the value of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of a V-cycle.

    It should be noted that there is potential for some asynchronous work between multigrid levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh could be constructed such that the efficiency model would far overestimate the V-cycle slowdown due to the asynchronous work "covering up" the imbalance (which assumes synchronization over levels). However, for most realistic adaptive meshes the expectation is that this asynchronous work will only cover up a very small portion of the imbalance and the efficiency model will describe the slowdown very well.

    Workload imbalance for algebraic multigrid methods

    -

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    +

    The considerations above show that one has to expect certain limits on the scalability of the geometric multigrid algorithm as it is implemented in deal.II because even in cases where the finest levels of a mesh are perfectly load balanced, the coarser levels may not be. At the same time, the coarser levels are weighted less (the contributions of $W_\ell$ to $W$ are small) because coarser levels have fewer cells and, consequently, do not contribute to the overall run time as much as finer levels. In other words, imbalances in the coarser levels may not lead to large effects in the big picture.

    Algebraic multigrid methods are of course based on an entirely different approach to creating a hierarchy of levels. In particular, they create these purely based on analyzing the system matrix, and very sophisticated algorithms for ensuring that the problem is well load-balanced on every level are implemented in both the hypre and ML/MueLu packages that underly the TrilinosWrappers::PreconditionAMG and PETScWrappers::PreconditionBoomerAMG classes. In some sense, these algorithms are simpler than for geometric multigrid methods because they only deal with the matrix itself, rather than all of the connotations of meshes, neighbors, parents, and other geometric entities. At the same time, much work has also been put into making algebraic multigrid methods scale to very large problems, including questions such as reducing the number of processors that work on a given level of the hierarchy to a subset of all processors, if otherwise processors would spend less time on computations than on communication. (One might note that it is of course possible to implement these same kinds of ideas also in geometric multigrid algorithms where one purposefully idles some processors on coarser levels to reduce the amount of communication. deal.II just doesn't do this at this time.)

    These are not considerations we typically have to worry about here, however: For most purposes, we use algebraic multigrid methods as black-box methods.

    Running the program

    @@ -1094,9 +1094,9 @@

    The result is a function that is similar to the one found in the "Use FEEvaluation::read_dof_values_plain() to avoid resolving constraints" subsection in the "Possibilities for extensions" section of step-37.

    -

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    +

    The reason for this function is that the MatrixFree operators do not take into account non-homogeneous Dirichlet constraints, instead treating all Dirichlet constraints as homogeneous. To account for this, the right-hand side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a zero vector except in the Dirichlet values. Then when solving, we have that the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton iteration on a linear system with initial guess $u_0$. The CG solve in the solve() function below computes $A^{-1}r_0$ and the call to constraints.distribute() (which directly follows) adds the $u_0$.

    Obviously, since we are considering a problem with zero Dirichlet boundary, we could have taken a similar approach to step-37 assemble_rhs(), but this additional work allows us to change the problem declaration if we so choose.

    -

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    +

    This function has two parts in the integration loop: applying the negative of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding the right-hand side contribution by submitting the value $f$. We must be sure to use read_dof_values_plain() for evaluating $u_0$ as read_dof_values() would set all Dirichlet values to zero.

    Finally, the system_rhs vector is of type LA::MPI::Vector, but the MatrixFree class only work for LinearAlgebra::distributed::Vector. Therefore we must compute the right-hand side using MatrixFree functionality and then use the functions in the ChangeVectorType namespace to copy it to the correct type.

      template <int dim, int degree>
      void LaplaceProblem<dim, degree>::assemble_rhs()
    @@ -1895,7 +1895,7 @@ 7,168 19 256M 0.16 1.214 0.893 0.521 2.628 2.386 7.260 2.560 12.206 1.844 1.010 1.890 4.744

    On the other hand, the algebraic multigrid in the last set of columns is relatively unaffected by the increasing imbalance of the mesh hierarchy (because it doesn't use the mesh hierarchy) and the growth in time is rather driven by other factors that are well documented in the literature (most notably that the algorithmic complexity of some parts of algebraic multigrid methods appears to be ${\cal O}(N
-\log N)$ instead of ${\cal O}(N)$ for geometric multigrid).

    +\log N)$" src="form_5655.png"/> instead of ${\cal O}(N)$ for geometric multigrid).

    The upshort of the table above is that the matrix-free geometric multigrid method appears to be the fastest approach to solving this equation if not by a huge margin. Matrix-based methods, on the other hand, are consistently the worst.

    The following figure provides strong scaling results for each method, i.e., we solve the same problem on more and more processors. Specifically, we consider the problems after 16 mesh refinement cycles (32M DoFs) and 19 cycles (256M DoFs), on between 56 to 28,672 processors:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-11-15 06:44:31.055686099 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_51.html 2024-11-15 06:44:31.055686099 +0000 @@ -170,7 +170,7 @@

    Introduction

    This tutorial program presents the implementation of a hybridizable discontinuous Galkerin method for the convection-diffusion equation.

    Hybridizable discontinuous Galerkin methods

    -

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    +

    One common argument against the use of discontinuous Galerkin elements is the large number of globally coupled degrees of freedom that one must solve in an implicit system. This is because, unlike continuous finite elements, in typical discontinuous elements there is one degree of freedom at each vertex for each of the adjacent elements, rather than just one, and similarly for edges and faces. As an example of how fast the number of unknowns grows, consider the FE_DGPMonomial basis: each scalar solution component is represented by polynomials of degree $p$ with $(1/\text{dim}!) \prod_{i=1}^{\text{dim}}(p+i)$ degrees of freedom per element. Typically, all degrees of freedom in an element are coupled to all of the degrees of freedom in the adjacent elements. The resulting discrete equations yield very large linear systems very quickly, especially for systems of equations in 2 or 3 dimensions.

    Reducing the size of the linear system

    To alleviate the computational cost of solving such large linear systems, the hybridizable discontinuous Galerkin (HDG) methodology was introduced by Cockburn and co-workers (see the references in the recent HDG overview article by Nguyen and Peraire [Ngu2012]).

    The HDG method achieves this goal by formulating the mathematical problem using Dirichlet-to-Neumann mappings. The partial differential equations are first written as a first order system, and each field is then discretized via a DG method. At this point, the single-valued "trace" values on the skeleton of the mesh, i.e., element faces, are taken to be independent unknown quantities. This yields unknowns in the discrete formulation that fall into two categories:

      @@ -199,7 +199,7 @@ A U &=& F - B \Lambda. \end{eqnarray*}" src="form_5662.png"/>

      -

      The point is that the presence of $A^{-1}$ is not a problem because $A$ is a block diagonal matrix where each block corresponds to one cell and is therefore easy enough to invert. The coupling to other cells is introduced by the matrices $B$ and $C$ over the skeleton variable. The block-diagonality of $A$ and the structure in $B$ and $C$ allow us to invert the matrix $A$ element by element (the local solution of the Dirichlet problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to

        +

        The point is that the presence of $A^{-1}$ is not a problem because $A$ is a block diagonal matrix where each block corresponds to one cell and is therefore easy enough to invert. The coupling to other cells is introduced by the matrices $B$ and $C$ over the skeleton variable. The block-diagonality of $A$ and the structure in $B$ and $C$ allow us to invert the matrix $A$ element by element (the local solution of the Dirichlet problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to

        1. constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ locally on each cell and inserting the contribution into the global trace matrix in the usual way,
        2. @@ -208,13 +208,13 @@ solving for $U$ using the second equation, given $\Lambda$.

        Solution quality and rates of convergence

        -

        Another criticism of traditional DG methods is that the approximate fluxes converge suboptimally. The local HDG solutions can be shown to converge as $\mathcal{O}(h^{p+1})$, i.e., at optimal order. Additionally, a super-convergence property can be used to post-process a new approximate solution that converges at the rate $\mathcal{O}(h^{p+2})$.

        +

        Another criticism of traditional DG methods is that the approximate fluxes converge suboptimally. The local HDG solutions can be shown to converge as $\mathcal{O}(h^{p+1})$, i.e., at optimal order. Additionally, a super-convergence property can be used to post-process a new approximate solution that converges at the rate $\mathcal{O}(h^{p+2})$.

        Alternative approaches

        The hybridizable discontinuous Galerkin method is only one way in which the problems of the discontinuous Galerkin method can be addressed. Another idea is what is called the "weak Galerkin" method. It is explored in step-61.

        HDG applied to the convection-diffusion problem

        The HDG formulation used for this example is taken from
        N.C. Nguyen, J. Peraire, B. Cockburn: An implicit high-order hybridizable discontinuous Galerkin method for linear convection–diffusion equations, Journal of Computational Physics, 2009, 228:9, 3232-3254. [DOI]

        -

        We consider the convection-diffusion equation over the domain $\Omega$ with Dirichlet boundary $\partial \Omega_D$ and Neumann boundary $\partial \Omega_N$:

        +

        We consider the convection-diffusion equation over the domain $\Omega$ with Dirichlet boundary $\partial \Omega_D$ and Neumann boundary $\partial \Omega_N$:

        \begin{eqnarray*}
         \nabla \cdot (\mathbf{c} u) - \nabla \cdot (\kappa \nabla u) &=& f,
         \quad \text{ in } \Omega, \\
@@ -232,7 +232,7 @@
         \quad \text{ on }  \partial \Omega_N.
 \end{eqnarray*}

        -

        We multiply these equations by the weight functions $\mathbf{v}, w$ and integrate by parts over every element $K$ to obtain:

        +

        We multiply these equations by the weight functions $\mathbf{v}, w$ and integrate by parts over every element $K$ to obtain:

        \begin{eqnarray*}
   (\mathbf{v}, \kappa^{-1} \mathbf{q})_K - (\nabla\cdot\mathbf{v}, u)_K
     + \left<\mathbf{v}\cdot\mathbf{n}, {\hat{u}}\right>_{\partial K} &=& 0, \\
@@ -248,7 +248,7 @@
 \end{eqnarray*}

        The variable $\hat {u}$ is introduced as an additional independent variable and is the one for which we finally set up a globally coupled linear system. As mentioned above, it is defined on the element faces and discontinuous from one face to another wherever faces meet (at vertices in 2d, and at edges and vertices in 3d). Values for $u$ and $\mathbf{q}$ appearing in the numerical trace function are taken to be the cell's interior solution restricted to the boundary $\partial K$.

        -

        The local stabilization parameter $\tau$ has effects on stability and accuracy of HDG solutions; see the literature for a further discussion. A stabilization parameter of unity is reported to be the choice which gives best results. A stabilization parameter $\tau$ that tends to infinity prohibits jumps in the solution over the element boundaries, making the HDG solution approach the approximation with continuous finite elements. In the program below, we choose the stabilization parameter as

        +

        The local stabilization parameter $\tau$ has effects on stability and accuracy of HDG solutions; see the literature for a further discussion. A stabilization parameter of unity is reported to be the choice which gives best results. A stabilization parameter $\tau$ that tends to infinity prohibits jumps in the solution over the element boundaries, making the HDG solution approach the approximation with continuous finite elements. In the program below, we choose the stabilization parameter as

        \begin{eqnarray*}
   \tau = \frac{\kappa}{\ell} + |\mathbf{c} \cdot \mathbf{n}|
 \end{eqnarray*} @@ -259,8 +259,8 @@ \hat{u}|_{\partial \Omega_D} = g_D, \end{equation*}" src="form_5681.png"/>

        -

        where the equal sign actually means an $L_2$ projection of the boundary function $g$ onto the space of the face variables (e.g. linear functions on the faces). This constraint is then applied to the skeleton variable $\hat{u}$ using inhomogeneous constraints by the method VectorTools::project_boundary_values.

        -

        Summing the elemental contributions across all elements in the triangulation, enforcing the normal component of the numerical flux, and integrating by parts on the equation weighted by $w$, we arrive at the final form of the problem: Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
+<p> where the equal sign actually means an <picture><source srcset=$L_2$ projection of the boundary function $g$ onto the space of the face variables (e.g. linear functions on the faces). This constraint is then applied to the skeleton variable $\hat{u}$ using inhomogeneous constraints by the method VectorTools::project_boundary_values.

        +

        Summing the elemental contributions across all elements in the triangulation, enforcing the normal component of the numerical flux, and integrating by parts on the equation weighted by $w$, we arrive at the final form of the problem: Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
 \mathcal{V}_h^p \times \mathcal{W}_h^p \times \mathcal{M}_h^p$ such that

        \begin{align*}
   (\mathbf{v}, \kappa^{-1} \mathbf{q}_h)_{\mathcal{T}}
@@ -289,21 +289,21 @@
 <p>We use the notation <picture><source srcset=$(\cdot, \cdot)_{\mathcal{T}} = \sum_K (\cdot, \cdot)_K$ to denote the sum of integrals over all cells and $\left<\cdot,
 \cdot\right>_{\partial \mathcal{T}} = \sum_K \left<\cdot,
 \cdot\right>_{\partial K}$ to denote integration over all faces of all cells, i.e., interior faces are visited twice, once from each side and with the corresponding normal vectors. When combining the contribution from both elements sharing a face, the above equation yields terms familiar from the DG method, with jumps of the solution over the cell boundaries.

        -

        In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable $u_h$ is defined as the space of functions that are tensor product polynomials of degree $p$ on each cell and discontinuous over the element boundaries $\mathcal Q_{-p}$, i.e., the space described by FE_DGQ<dim>(p). The space for the gradient or flux variable $\mathbf{q}_i$ is a vector element space where each component is a locally polynomial and discontinuous $\mathcal Q_{-p}$. In the code below, we collect these two local parts together in one FESystem where the first dim components denote the gradient part and the last scalar component corresponds to the scalar variable. For the skeleton component $\hat{u}_h$, we define a space that consists of discontinuous tensor product polynomials that live on the element faces, which in deal.II is implemented by the class FE_FaceQ. This space is otherwise similar to FE_DGQ, i.e., the solution function is not continuous between two neighboring faces, see also the results section below for an illustration.

        +

        In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable $u_h$ is defined as the space of functions that are tensor product polynomials of degree $p$ on each cell and discontinuous over the element boundaries $\mathcal Q_{-p}$, i.e., the space described by FE_DGQ<dim>(p). The space for the gradient or flux variable $\mathbf{q}_i$ is a vector element space where each component is a locally polynomial and discontinuous $\mathcal Q_{-p}$. In the code below, we collect these two local parts together in one FESystem where the first dim components denote the gradient part and the last scalar component corresponds to the scalar variable. For the skeleton component $\hat{u}_h$, we define a space that consists of discontinuous tensor product polynomials that live on the element faces, which in deal.II is implemented by the class FE_FaceQ. This space is otherwise similar to FE_DGQ, i.e., the solution function is not continuous between two neighboring faces, see also the results section below for an illustration.

        In the weak form given above, we can note the following coupling patterns:

        1. The matrix $A$ consists of local-local coupling terms. These arise when the local weighting functions $(\mathbf{v}, w)$ multiply the local solution terms $(\mathbf{q}_h, u_h)$. Because the elements are discontinuous, $A$ is block diagonal.
        2. -The matrix $B$ represents the local-face coupling. These are the terms with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable $\hat{u}_h$.
        3. +The matrix $B$ represents the local-face coupling. These are the terms with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable $\hat{u}_h$.
        4. -The matrix $C$ represents the face-local coupling, which involves the weighting function $\mu$ multiplying the local solutions $(\mathbf{q}_h, u_h)$.
        5. +The matrix $C$ represents the face-local coupling, which involves the weighting function $\mu$ multiplying the local solutions $(\mathbf{q}_h, u_h)$.
        6. -The matrix $D$ is the face-face coupling; terms involve both $\mu$ and $\hat{u}_h$.
        7. +The matrix $D$ is the face-face coupling; terms involve both $\mu$ and $\hat{u}_h$.

        Post-processing and super-convergence

        -

        One special feature of the HDG methods is that they typically allow for constructing an enriched solution that gains accuracy. This post-processing takes the HDG solution in an element-by-element fashion and combines it such that one can get $\mathcal O(h^{p+2})$ order of accuracy when using polynomials of degree $p$. For this to happen, there are two necessary ingredients:

          +

          One special feature of the HDG methods is that they typically allow for constructing an enriched solution that gains accuracy. This post-processing takes the HDG solution in an element-by-element fashion and combines it such that one can get $\mathcal O(h^{p+2})$ order of accuracy when using polynomials of degree $p$. For this to happen, there are two necessary ingredients:

          1. -The computed solution gradient $\mathbf{q}_h$ converges at optimal rate, i.e., $\mathcal{O}(h^{p+1})$.
          2. +The computed solution gradient $\mathbf{q}_h$ converges at optimal rate, i.e., $\mathcal{O}(h^{p+1})$.
          3. The cell-wise average of the scalar part of the solution, $\frac{(1,u_h)_K}{\text{vol}(K)}$, super-converges at rate $\mathcal{O}(h^{p+2})$.
          @@ -319,7 +319,7 @@

          Since we test by the whole set of basis functions in the space of tensor product polynomials of degree $p+1$ in the second set of equations, this is an overdetermined system with one more equation than unknowns. We fix this in the code below by omitting one of these equations (since the rows in the Laplacian are linearly dependent when representing a constant function). As we will see below, this form of the post-processing gives the desired super-convergence result with rate $\mathcal {O}(h^{p+2})$. It should be noted that there is some freedom in constructing $u_h^*$ and this minimization approach to extract the information from the gradient is not the only one. In particular, the post-processed solution defined here does not satisfy the convection-diffusion equation in any sense. As an alternative, the paper by Nguyen, Peraire and Cockburn cited above suggests another somewhat more involved formula for convection-diffusion that can also post-process the flux variable into an $H(\Omega,\mathrm{div})$-conforming variant and better represents the local convection-diffusion operator when the diffusion is small. We leave the implementation of a more sophisticated post-processing as a possible extension to the interested reader.

          Note that for vector-valued problems, the post-processing works similarly. One simply sets the constraint for the mean value of each vector component separately and uses the gradient as the main source of information.

          Problem specific data

          -

          For this tutorial program, we consider almost the same test case as in step-7. The computational domain is $\Omega \dealcoloneq [-1,1]^d$ and the exact solution corresponds to the one in step-7, except for a scaling. We use the following source centers $x_i$ for the exponentials

            +

            For this tutorial program, we consider almost the same test case as in step-7. The computational domain is $\Omega \dealcoloneq [-1,1]^d$ and the exact solution corresponds to the one in step-7, except for a scaling. We use the following source centers $x_i$ for the exponentials

            • 1D: $\{x_i\}^1 = \{ -\frac{1}{3}, 0, \frac{1}{3} \}$,
            • @@ -947,7 +947,7 @@
              @ update_quadrature_points
              Transformed quadrature points.
              void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)

    HDG::assemble_system_one_cell

    -

    The real work of the HDG program is done by assemble_system_one_cell. Assembling the local matrices $A, B, C$ is done here, along with the local contributions of the global matrix $D$.

    +

    The real work of the HDG program is done by assemble_system_one_cell. Assembling the local matrices $A, B, C$ is done here, along with the local contributions of the global matrix $D$.

      template <int dim>
      void HDG<dim>::assemble_system_one_cell(
      const typename DoFHandler<dim>::active_cell_iterator &cell,
    @@ -1156,7 +1156,7 @@

    Once assembly of all of the local contributions is complete, we must either: (1) assemble the global system, or (2) compute the local solution values and save them. In either case, the first step is to invert the local-local matrix.

      scratch.ll_matrix.gauss_jordan();
     
    -

    For (1), we compute the Schur complement and add it to the cell_matrix, matrix $D$ in the introduction.

    +

    For (1), we compute the Schur complement and add it to the cell_matrix, matrix $D$ in the introduction.

      if (task_data.trace_reconstruct == false)
      {
      scratch.fl_matrix.mmult(scratch.tmp_matrix, scratch.ll_matrix);
    @@ -1695,7 +1695,7 @@
    2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
    4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
    9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
    -

    One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates. The quadratic convergence rates of Q1 elements in the $L_2$ norm for both the scalar variable and the gradient variable is apparent, as is the cubic rate for the postprocessed scalar variable in the $L_2$ norm. Note this distinctive feature of an HDG solution. In typical continuous finite elements, the gradient of the solution of order $p$ converges at rate $p$ only, as opposed to $p+1$ for the actual solution. Even though superconvergence results for finite elements are also available (e.g. superconvergent patch recovery first introduced by Zienkiewicz and Zhu), these are typically limited to structured meshes and other special cases. For Q3 HDG variables, the scalar variable and gradient converge at fourth order and the postprocessed scalar variable at fifth order.

    +

    One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates. The quadratic convergence rates of Q1 elements in the $L_2$ norm for both the scalar variable and the gradient variable is apparent, as is the cubic rate for the postprocessed scalar variable in the $L_2$ norm. Note this distinctive feature of an HDG solution. In typical continuous finite elements, the gradient of the solution of order $p$ converges at rate $p$ only, as opposed to $p+1$ for the actual solution. Even though superconvergence results for finite elements are also available (e.g. superconvergent patch recovery first introduced by Zienkiewicz and Zhu), these are typically limited to structured meshes and other special cases. For Q3 HDG variables, the scalar variable and gradient converge at fourth order and the postprocessed scalar variable at fifth order.

    The same convergence rates are observed in 3d.

    Q1 elements, adaptive refinement:
    cells dofs val L2 grad L2 val L2-post
    8 144 7.122e+00 1.941e+01 6.102e+00
    @@ -1736,7 +1736,7 @@
    110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00

    Comparison with continuous finite elements

    Results for 2D

    -

    The convergence tables verify the expected convergence rates stated in the introduction. Now, we want to show a quick comparison of the computational efficiency of the HDG method compared to a usual finite element (continuous Galkerin) method on the problem of this tutorial. Of course, stability aspects of the HDG method compared to continuous finite elements for transport-dominated problems are also important in practice, which is an aspect not seen on a problem with smooth analytic solution. In the picture below, we compare the $L_2$ error as a function of the number of degrees of freedom (left) and of the computing time spent in the linear solver (right) for two space dimensions of continuous finite elements (CG) and the hybridized discontinuous Galerkin method presented in this tutorial. As opposed to the tutorial where we only use unpreconditioned BiCGStab, the times shown in the figures below use the Trilinos algebraic multigrid preconditioner in TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around ChunkSparseMatrix for the trace variable has been used in order to utilize the block structure in the matrix on the finest level.

    +

    The convergence tables verify the expected convergence rates stated in the introduction. Now, we want to show a quick comparison of the computational efficiency of the HDG method compared to a usual finite element (continuous Galkerin) method on the problem of this tutorial. Of course, stability aspects of the HDG method compared to continuous finite elements for transport-dominated problems are also important in practice, which is an aspect not seen on a problem with smooth analytic solution. In the picture below, we compare the $L_2$ error as a function of the number of degrees of freedom (left) and of the computing time spent in the linear solver (right) for two space dimensions of continuous finite elements (CG) and the hybridized discontinuous Galerkin method presented in this tutorial. As opposed to the tutorial where we only use unpreconditioned BiCGStab, the times shown in the figures below use the Trilinos algebraic multigrid preconditioner in TrilinosWrappers::PreconditionAMG. For the HDG part, a wrapper around ChunkSparseMatrix for the trace variable has been used in order to utilize the block structure in the matrix on the finest level.

    @@ -1753,7 +1753,7 @@

    The results are in line with properties of DG methods in general: Best performance is typically not achieved for linear elements, but rather at somewhat higher order, usually around $p=3$. This is because of a volume-to-surface effect for discontinuous solutions with too much of the solution living on the surfaces and hence duplicating work when the elements are linear. Put in other words, DG methods are often most efficient when used at relatively high order, despite their focus on a discontinuous (and hence, seemingly low accurate) representation of solutions.

    Results for 3D

    -

    We now show the same figures in 3D: The first row shows the number of degrees of freedom and computing time versus the $L_2$ error in the scalar variable $u$ for CG and HDG at order $p$, the second row shows the post-processed HDG solution instead of the original one, and the third row compares the post-processed HDG solution with CG at order $p+1$. In 3D, the volume-to-surface effect makes the cost of HDG somewhat higher and the CG solution is clearly better than HDG for linears by any metric. For cubics, HDG and CG are of similar quality, whereas HDG is again more efficient for sixth order polynomials. One can alternatively also use the combination of FE_DGP and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product polynomials of degree $p$ but Legendre polynomials of complete degree $p$. There are fewer degrees of freedom on the skeleton variable for FE_FaceP for a given mesh size, but the solution quality (error vs. number of DoFs) is very similar to the results for FE_FaceQ.

    +

    We now show the same figures in 3D: The first row shows the number of degrees of freedom and computing time versus the $L_2$ error in the scalar variable $u$ for CG and HDG at order $p$, the second row shows the post-processed HDG solution instead of the original one, and the third row compares the post-processed HDG solution with CG at order $p+1$. In 3D, the volume-to-surface effect makes the cost of HDG somewhat higher and the CG solution is clearly better than HDG for linears by any metric. For cubics, HDG and CG are of similar quality, whereas HDG is again more efficient for sixth order polynomials. One can alternatively also use the combination of FE_DGP and FE_FaceP instead of (FE_DGQ, FE_FaceQ), which do not use tensor product polynomials of degree $p$ but Legendre polynomials of complete degree $p$. There are fewer degrees of freedom on the skeleton variable for FE_FaceP for a given mesh size, but the solution quality (error vs. number of DoFs) is very similar to the results for FE_FaceQ.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 2024-11-15 06:44:31.111686600 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_52.html 2024-11-15 06:44:31.111686600 +0000 @@ -159,7 +159,7 @@ - \Sigma_a(x) \phi(x,t) + S(x,t) \end{eqnarray*}" src="form_5724.png"/>

    -

    augmented by appropriate boundary conditions. Here, $v$ is the velocity of neutrons (for simplicity we assume it is equal to 1 which can be achieved by simply scaling the time variable), $D$ is the diffusion coefficient, $\Sigma_a$ is the absorption cross section, and $S$ is a source. Because we are only interested in the time dependence, we assume that $D$ and $\Sigma_a$ are constant.

    +

    augmented by appropriate boundary conditions. Here, $v$ is the velocity of neutrons (for simplicity we assume it is equal to 1 which can be achieved by simply scaling the time variable), $D$ is the diffusion coefficient, $\Sigma_a$ is the absorption cross section, and $S$ is a source. Because we are only interested in the time dependence, we assume that $D$ and $\Sigma_a$ are constant.

    Since this program only intends to demonstrate how to use advanced time stepping algorithms, we will only look for the solutions of relatively simple problems. Specifically, we are looking for a solution on a square domain $[0,b]\times[0,b]$ of the form

    \begin{eqnarray*}
 \phi(x,t) = A\sin(\omega t)(bx-x^2).
@@ -172,7 +172,7 @@
 \left(\Sigma_a (bx-x^2)+2D\right) \right).
 \end{eqnarray*}

    -

    Because the solution is a sine in time, we know that the exact solution satisfies $\phi\left(x,\frac{\pi}{\omega}\right) = 0$. Therefore, the error at time $t=\frac{\pi}{\omega}$ is simply the norm of the numerical solution, i.e., $\|e(\cdot,t=\frac{\pi}{\omega})\|_{L_2} = \|\phi_h(\cdot,t=\frac{\pi}{\omega})\|_{L_2}$, and is particularly easily evaluated. In the code, we evaluate the $l_2$ norm of the vector of nodal values of $\phi_h$ instead of the $L_2$ norm of the associated spatial function, since the former is simpler to compute; however, on uniform meshes, the two are just related by a constant and we can consequently observe the temporal convergence order with either.

    +

    Because the solution is a sine in time, we know that the exact solution satisfies $\phi\left(x,\frac{\pi}{\omega}\right) = 0$. Therefore, the error at time $t=\frac{\pi}{\omega}$ is simply the norm of the numerical solution, i.e., $\|e(\cdot,t=\frac{\pi}{\omega})\|_{L_2} = \|\phi_h(\cdot,t=\frac{\pi}{\omega})\|_{L_2}$, and is particularly easily evaluated. In the code, we evaluate the $l_2$ norm of the vector of nodal values of $\phi_h$ instead of the $L_2$ norm of the associated spatial function, since the former is simpler to compute; however, on uniform meshes, the two are just related by a constant and we can consequently observe the temporal convergence order with either.

    Runge-Kutta methods

    The Runge-Kutta methods implemented in deal.II assume that the equation to be solved can be written as:

    \begin{eqnarray*}
@@ -189,12 +189,12 @@
   \frac{\partial u(x,t)}{\partial t} = q(t,u(x,t))
 \end{eqnarray*}

    -

    by test functions, integrating over $\Omega$, substituting $u\rightarrow u_h$ and restricting the test functions to the $\varphi_i(x)$ from above, then this spatially discretized equation has the form

    +

    by test functions, integrating over $\Omega$, substituting $u\rightarrow u_h$ and restricting the test functions to the $\varphi_i(x)$ from above, then this spatially discretized equation has the form

    \begin{eqnarray*}
 M\frac{dU}{dt} = f(t,U),
 \end{eqnarray*}

    -

    where $M$ is the mass matrix and $f(t,U)$ is the spatially discretized version of $q(t,u(x,t))$ (where $q$ is typically the place where spatial derivatives appear, but this is not of much concern for the moment given that we only consider time derivatives). In other words, this form fits the general scheme above if we write

    +

    where $M$ is the mass matrix and $f(t,U)$ is the spatially discretized version of $q(t,u(x,t))$ (where $q$ is typically the place where spatial derivatives appear, but this is not of much concern for the moment given that we only consider time derivatives). In other words, this form fits the general scheme above if we write

    \begin{eqnarray*}
 \frac{dy}{dt} = g(t,y) = M^{-1}f(t,y).
 \end{eqnarray*} @@ -210,7 +210,7 @@ k_i = \Delta t \, M^{-1} f\left(t_n+c_ih,y_n+\sum_{j=1}^sa_{ij}k_j\right). \end{eqnarray*}" src="form_5748.png"/>

    -

    Here $a_{ij}$, $b_i$, and $c_i$ are known coefficients that identify which particular Runge-Kutta scheme you want to use, and $\Delta t=t_{n+1}-t_n$ is the time step used. Different time stepping methods of the Runge-Kutta class differ in the number of stages $s$ and the values they use for the coefficients $a_{ij}$, $b_i$, and $c_i$ but are otherwise easy to implement since one can look up tabulated values for these coefficients. (These tables are often called Butcher tableaus.)

    +

    Here $a_{ij}$, $b_i$, and $c_i$ are known coefficients that identify which particular Runge-Kutta scheme you want to use, and $\Delta t=t_{n+1}-t_n$ is the time step used. Different time stepping methods of the Runge-Kutta class differ in the number of stages $s$ and the values they use for the coefficients $a_{ij}$, $b_i$, and $c_i$ but are otherwise easy to implement since one can look up tabulated values for these coefficients. (These tables are often called Butcher tableaus.)

    At the time of the writing of this tutorial, the methods implemented in deal.II can be divided in three categories:

    1. Explicit Runge-Kutta; in order for a method to be explicit, it is necessary that in the formula above defining $k_i$, $k_i$ does not appear on the right hand side. In other words, these methods have to satisfy $a_{ii}=0, i=1,\ldots,s$.
    2. @@ -221,14 +221,14 @@

    Many well known time stepping schemes that one does not typically associate with the names Runge or Kutta can in fact be written in a way so that they, too, can be expressed in these categories. They oftentimes represent the lowest-order members of these families; one example is the simple explicit Euler method.

    Explicit Runge-Kutta methods

    -

    These methods, only require a function to evaluate $M^{-1}f(t,y)$ but not (as implicit methods) to solve an equation that involves $f(t,y)$ for $y$. As all explicit time stepping methods, they become unstable when the time step chosen is too large.

    +

    These methods, only require a function to evaluate $M^{-1}f(t,y)$ but not (as implicit methods) to solve an equation that involves $f(t,y)$ for $y$. As all explicit time stepping methods, they become unstable when the time step chosen is too large.

    Well known methods in this class include forward Euler, third order Runge-Kutta, and fourth order Runge-Kutta (often abbreviated as RK4).

    Embedded Runge-Kutta methods

    These methods use both a lower and a higher order method to estimate the error and decide if the time step needs to be shortened or can be increased. The term "embedded" refers to the fact that the lower-order method does not require additional evaluates of the function $M^{-1}f(\cdot,\cdot)$ but reuses data that has to be computed for the high order method anyway. It is, in other words, essentially free, and we get the error estimate as a side product of using the higher order method.

    This class of methods include Heun-Euler, Bogacki-Shampine, Dormand-Prince (ode45 in Matlab and often abbreviated as RK45 to indicate that the lower and higher order methods used here are 4th and 5th order Runge-Kutta methods, respectively), Fehlberg, and Cash-Karp.

    At the time of the writing, only embedded explicit methods have been implemented.

    Implicit Runge-Kutta methods

    -

    Implicit methods require the solution of (possibly nonlinear) systems of the form $\alpha y = f(t,y)$ for $y$ in each (sub-)timestep. Internally, this is done using a Newton-type method and, consequently, they require that the user provide functions that can evaluate $M^{-1}f(t,y)$ and $\left(I-\tau M^{-1} \frac{\partial f}{\partial y}\right)^{-1}$ or equivalently $\left(M - \tau \frac{\partial f}{\partial y}\right)^{-1} M$.

    +

    Implicit methods require the solution of (possibly nonlinear) systems of the form $\alpha y = f(t,y)$ for $y$ in each (sub-)timestep. Internally, this is done using a Newton-type method and, consequently, they require that the user provide functions that can evaluate $M^{-1}f(t,y)$ and $\left(I-\tau M^{-1} \frac{\partial f}{\partial y}\right)^{-1}$ or equivalently $\left(M - \tau \frac{\partial f}{\partial y}\right)^{-1} M$.

    The particular form of this operator results from the fact that each Newton step requires the solution of an equation of the form

    \begin{align*}
   \left(M - \tau \frac{\partial f}{\partial y}\right) \Delta y
@@ -237,9 +237,9 @@
 </p>
 <p> for some (given) <picture><source srcset=$h(t,y)$. Implicit methods are always stable, regardless of the time step size, but too large time steps of course affect the accuracy of the solution, even if the numerical solution remains stable and bounded.

    Methods in this class include backward Euler, implicit midpoint, Crank-Nicolson, and the two stage SDIRK method (short for "singly diagonally -implicit Runge-Kutta", a term coined to indicate that the diagonal elements $a_{ii}$ defining the time stepping method are all equal; this property allows for the Newton matrix $I-\tau M^{-1}\frac{\partial f}{\partial y}$ to be re-used between stages because $\tau$ is the same every time).

    +implicit Runge-Kutta", a term coined to indicate that the diagonal elements $a_{ii}$ defining the time stepping method are all equal; this property allows for the Newton matrix $I-\tau M^{-1}\frac{\partial f}{\partial y}$ to be re-used between stages because $\tau$ is the same every time).

    Spatially discrete formulation

    -

    By expanding the solution of our model problem as always using shape functions $\psi_j$ and writing

    +

    By expanding the solution of our model problem as always using shape functions $\psi_j$ and writing

    \begin{eqnarray*}
 \phi_h(x,t) = \sum_j U_j(t) \psi_j(x),
 \end{eqnarray*} @@ -315,8 +315,8 @@

     

    The Diffusion class

    -

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
-   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

    +

    The next piece is the declaration of the main class. Most of the functions in this class are not new and have been explained in previous tutorials. The only interesting functions are evaluate_diffusion() and id_minus_tau_J_inverse(). evaluate_diffusion() evaluates the diffusion equation, $M^{-1}(f(t,y))$, at a given time and a given $y$. id_minus_tau_J_inverse() evaluates $\left(I-\tau
+   M^{-1} \frac{\partial f(t,y)}{\partial y}\right)^{-1}$ or equivalently $\left(M-\tau \frac{\partial f}{\partial y}\right)^{-1} M$ at a given time, for a given $\tau$ and $y$. This function is needed when an implicit method is used.

      class Diffusion
      {
      public:
    @@ -526,8 +526,8 @@
    ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)

    Diffusion::evaluate_diffusion

    -

    Next, we evaluate the weak form of the diffusion equation at a given time $t$ and for a given vector $y$. In other words, as outlined in the introduction, we evaluate $M^{-1}(-{\cal D}y - {\cal A}y + {\cal
-   S})$. For this, we have to apply the matrix $-{\cal D} - {\cal A}$ (previously computed and stored in the variable system_matrix) to $y$ and then add the source term which we integrate as we usually do. (Integrating up the solution could be done using VectorTools::create_right_hand_side() if you wanted to save a few lines of code, or wanted to take advantage of doing the integration in parallel.) The result is then multiplied by $M^{-1}$.

    +

    Next, we evaluate the weak form of the diffusion equation at a given time $t$ and for a given vector $y$. In other words, as outlined in the introduction, we evaluate $M^{-1}(-{\cal D}y - {\cal A}y + {\cal
+   S})$. For this, we have to apply the matrix $-{\cal D} - {\cal A}$ (previously computed and stored in the variable system_matrix) to $y$ and then add the source term which we integrate as we usually do. (Integrating up the solution could be done using VectorTools::create_right_hand_side() if you wanted to save a few lines of code, or wanted to take advantage of doing the integration in parallel.) The result is then multiplied by $M^{-1}$.

      Vector<double> Diffusion::evaluate_diffusion(const double time,
      const Vector<double> &y) const
      {
    @@ -917,7 +917,7 @@
     
      assemble_system();
     
    -

    Finally, we solve the diffusion problem using several of the Runge-Kutta methods implemented in namespace TimeStepping, each time outputting the error at the end time. (As explained in the introduction, since the exact solution is zero at the final time, the error equals the numerical solution and can be computed by just taking the $l_2$ norm of the solution vector.)

    +

    Finally, we solve the diffusion problem using several of the Runge-Kutta methods implemented in namespace TimeStepping, each time outputting the error at the end time. (As explained in the introduction, since the exact solution is zero at the final time, the error equals the numerical solution and can be computed by just taking the $l_2$ norm of the solution vector.)

      unsigned int n_steps = 0;
      const unsigned int n_time_steps = 200;
      const double initial_time = 0.;
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-11-15 06:44:31.155686993 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_53.html 2024-11-15 06:44:31.155686993 +0000 @@ -155,14 +155,14 @@

    To illustrate how one describes geometries using charts in deal.II, we will consider a case that originates in an application of the ASPECT mantle convection code, using a data set provided by D. Sarah Stamps. In the concrete application, we were interested in describing flow in the Earth mantle under the East African Rift, a zone where two continental plates drift apart. Not to beat around the bush, the geometry we want to describe looks like this:

    In particular, though you cannot see this here, the top surface is not just colored by the elevation but is, in fact, deformed to follow the correct topography. While the actual application is not relevant here, the geometry is. The domain we are interested in is a part of the Earth that ranges from the surface to a depth of 500km, from 26 to 35 degrees East of the Greenwich meridian, and from 5 degrees North of the equator to 10 degrees South.

    -

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    -

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    +

    This description of the geometry suggests to start with a box $\hat U=[26,35]\times[-10,5]\times[-500000,0]$ (measured in degrees, degrees, and meters) and to provide a map $\varphi$ so that $\varphi^{-1}(\hat U)=\Omega$ where $\Omega$ is the domain we seek. $(\Omega,\varphi)$ is then a chart, $\varphi$ the pull-back operator, and $\varphi^{-1}$ the push-forward operator. If we need a point $q$ that is the "average" of other points $q_i\in\Omega$, the ChartManifold class then first applies the pull-back to obtain $\hat q_i=\varphi(q_i)$, averages these to a point $\hat p$ and then computes $p=\varphi^{-1}(\hat p)$.

    +

    Our goal here is therefore to implement a class that describes $\varphi$ and $\varphi^{-1}$. If Earth was a sphere, then this would not be difficult: if we denote by $(\hat \phi,\hat \theta,\hat d)$ the points of $\hat U$ (i.e., longitude counted eastward, latitude counted northward, and elevation relative to zero depth), then

    \[
   \mathbf x = \varphi^{-1}(\hat \phi,\hat \theta,\hat d)
   = (R+\hat d) (\cos\hat \phi\cos\hat \theta, \sin\hat \phi\cos\hat \theta, \sin\hat \theta)^T
 \]

    -

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    +

    provides coordinates in a Cartesian coordinate system, where $R$ is the radius of the sphere. However, the Earth is not a sphere:

    1. It is flattened at the poles and larger at the equator: the semi-major axis is approximately 22km longer than the semi-minor axis. We will account for this using the WGS 84 reference standard for the Earth shape. The formula used in WGS 84 to obtain a position in Cartesian coordinates from longitude, latitude, and elevation is

      @@ -225,7 +225,7 @@

      Computing $\varphi_\text{WGS84}(\mathbf x)$ is also possible though a lot more awkward. We won't show the formula here but instead only provide the implementation in the program.

      Implementation

      -

      There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

      class AfricaGeometry : public ChartManifold<3,3>
      +

      There are a number of issues we need to address in the program. At the largest scale, we need to write a class that implements the interface of ChartManifold. This involves a function push_forward() that takes a point in the reference domain $\hat U$ and transform it into real space using the function $\varphi^{-1}$ outlined above, and its inverse function pull_back() implementing $\varphi$. We will do so in the AfricaGeometry class below that looks, in essence, like this:

      class AfricaGeometry : public ChartManifold<3,3>
      {
      public:
      virtual
      @@ -256,7 +256,7 @@
      -11.983333 35.966667 687
      -11.983333 35.983333 659

      The data is formatted as latitude longitude elevation where the first two columns are provided in degrees North of the equator and degrees East of the Greenwich meridian. The final column is given in meters above the WGS 84 zero elevation.

      -

      In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

      +

      In the transformation functions, we need to evaluate $h(\hat\phi,\hat\theta)$ for a given longitude $\hat\phi$ and latitude $\hat\theta$. In general, this data point will not be available and we will have to interpolate between adjacent data points. Writing such an interpolation routine is not particularly difficult, but it is a bit tedious and error prone. Fortunately, we can somehow shoehorn this data set into an existing class: Functions::InterpolatedUniformGridData . Unfortunately, the class does not fit the bill quite exactly and so we need to work around it a bit. The problem comes from the way we initialize this class: in its simplest form, it takes a stream of values that it assumes form an equispaced mesh in the $x-y$ plane (or, here, the $\phi-\theta$ plane). Which is what they do here, sort of: they are ordered latitude first, longitude second; and more awkwardly, the first column starts at the largest values and counts down, rather than the usual other way around.

      Now, while tutorial programs are meant to illustrate how to code with deal.II, they do not necessarily have to satisfy the same quality standards as one would have to do with production codes. In a production code, we would write a function that reads the data and (i) automatically determines the extents of the first and second column, (ii) automatically determines the number of data points in each direction, (iii) does the interpolation regardless of the order in which data is arranged, if necessary by switching the order between reading and presenting it to the Functions::InterpolatedUniformGridData class.

      On the other hand, tutorial programs are best if they are short and demonstrate key points rather than dwell on unimportant aspects and, thereby, obscure what we really want to show. Consequently, we will allow ourselves a bit of leeway:

      • since this program is intended solely for a particular geometry around the area of the East-African rift and since this is precisely the area described by the data file, we will hardcode in the program that there are $1139\times 660$ pieces of data;
      • @@ -698,7 +698,7 @@
        • Does it matter? It is almost certainly true that this depends on the equation you are solving. For example, it is known that solving the Euler equations of gas dynamics on complex geometries requires highly accurate boundary descriptions to ensure convergence of quantities that are measure the flow close to the boundary. On the other hand, equations with elliptic components (e.g., the Laplace or Stokes equations) are typically rather forgiving of these issues: one does quadrature anyway to approximate integrals, and further approximating the geometry may not do as much harm as one could fear given that the volume of the overlaps or gaps at every hanging node is only ${\cal O}(h^d)$ even with a linear mapping and ${\cal
-  O}(h^{d+p-1})$ for a mapping of degree $p$. (You can see this by considering that in 2d the gap/overlap is a triangle with base $h$ and height ${\cal
+  O}(h^{d+p-1})$ for a mapping of degree $p$. (You can see this by considering that in 2d the gap/overlap is a triangle with base $h$ and height ${\cal
   O}(h)$; in 3d, it is a pyramid-like structure with base area $h^2$ and height ${\cal O}(h)$. Similar considerations apply for higher order mappings where the height of the gaps/overlaps is ${\cal O}(h^p)$.) In other words, if you use a linear mapping with linear elements, the error in the volume you integrate over is already at the same level as the integration error using the usual Gauss quadrature. Of course, for higher order elements one would have to choose matching mapping objects.

          Another point of view on why it is probably not worth worrying too much about the issue is that there is certainly no narrative in the community of numerical analysts that these issues are a major concern one needs to watch out for when using complex geometries. If it does not seem to be discussed often among practitioners, if ever at all, then it is at least not something people have identified as a common problem.

          This issue is not dissimilar to having hanging nodes at curved boundaries where the geometry description of the boundary typically pulls a hanging node onto the boundary whereas the large edge remains straight, making the adjacent small and large cells not match each other. Although this behavior existed in deal.II since its beginning, 15 years before manifold descriptions became available, it did not ever come up in mailing list discussions or conversations with colleagues.

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 2024-11-15 06:44:31.211687493 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_55.html 2024-11-15 06:44:31.211687493 +0000 @@ -153,7 +153,7 @@
        • You can implement various other tasks for parallel programs: error computation, writing graphical output, etc.
        • You can visualize vector fields, stream lines, and contours of vector quantities.
        -

        We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

        +

        We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the Stokes equation, which reads

        \begin{eqnarray*}
   - \triangle \textbf{u} + \nabla p &=& \textbf{f}, \\
   -\textrm{div}\; \textbf{u} &=& 0.
@@ -162,8 +162,8 @@
 <p><a class=

        Optimal preconditioners

        Make sure that you read (even better: try) what is described in "Block Schur complement preconditioner" in the "Possible Extensions" section in step-22. Like described there, we are going to solve the block system using a Krylov method and a block preconditioner.

        -

        Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require independence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

        -

        Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

        +

        Our goal here is to construct a very simple (maybe the simplest?) optimal preconditioner for the linear system. A preconditioner is called "optimal" or "of optimal complexity", if the number of iterations of the preconditioned system is independent of the mesh size $h$. You can extend that definition to also require independence of the number of processors used (we will discuss that in the results section), the computational domain and the mesh quality, the test case itself, the polynomial degree of the finite element space, and more.

        +

        Why is a constant number of iterations considered to be "optimal"? Assume the discretized PDE gives a linear system with N unknowns. Because the matrix coming from the FEM discretization is sparse, a matrix-vector product can be done in O(N) time. A preconditioner application can also only be O(N) at best (for example doable with multigrid methods). If the number of iterations required to solve the linear system is independent of $h$ (and therefore N), the total cost of solving the system will be O(N). It is not possible to beat this complexity, because even looking at all the entries of the right-hand side already takes O(N) time. For more information see [elman2005], Chapter 2.5 (Multigrid).

        The preconditioner described here is even simpler than the one described in step-22 and will typically require more iterations and consequently time to solve. When considering preconditioners, optimality is not the only important metric. But an optimal and expensive preconditioner is typically more desirable than a cheaper, non-optimal one. This is because, eventually, as the mesh size becomes smaller and smaller and linear problems become bigger and bigger, the former will eventually beat the latter.

        The solver and preconditioner

        We precondition the linear system

        @@ -194,14 +194,14 @@ \end{eqnarray*}" src="form_5826.png"/>

        where $S=-BA^{-1} B^T$ is the Schur complement.

        -

        With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

        -

        We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

        -

        For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

        -

        For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

        +

        With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly (which is an "idealized" situation), the preconditioned linear system has three distinct eigenvalues independent of $h$ and is therefore "optimal". See section 6.2.1 (especially p. 292) in [elman2005]. For comparison, using the ideal version of the upper block-triangular preconditioner in step-22 (also used in step-56) would have all eigenvalues be equal to one.

        +

        We will use approximations of the inverse operations in $P^{-1}$ that are (nearly) independent of $h$. In this situation, one can again show, that the eigenvalues are independent of $h$. For the Krylov method we choose MINRES, which is attractive for the analysis (iteration count is proven to be independent of $h$, see the remainder of the chapter 6.2.1 in [elman2005]), great from the computational standpoint (simpler and cheaper than GMRES for example), and applicable (matrix and preconditioner are symmetric).

        +

        For the approximations we will use a CG solve with the mass matrix in the pressure space for approximating the action of $S^{-1}$. Note that the mass matrix is spectrally equivalent to $S$. We can expect the number of CG iterations to be independent of $h$, even with a simple preconditioner like ILU.

        +

        For the approximation of the velocity block $A$ we will perform a single AMG V-cycle. In practice this choice is not exactly independent of $h$, which can explain the slight increase in iteration numbers. A possible explanation is that the coarsest level will be solved exactly and the number of levels and size of the coarsest matrix is not predictable.

        The testcase

        We will construct a manufactured solution based on the classical Kovasznay problem, see [kovasznay1948laminar]. Here is an image of the solution colored by the x velocity including streamlines of the velocity:

        -

        We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

        +

        We have to cheat here, though, because we are not solving the non-linear Navier-Stokes equations, but the linear Stokes system without convective term. Therefore, to recreate the exact same solution, we use the method of manufactured solutions with the solution of the Kovasznay problem. This will effectively move the convective term into the right-hand side $f$.

        The right-hand side is computed using the script "reference.py" and we use the exact solution for boundary conditions and error computation.

        The commented program

          #include <deal.II/base/quadrature_lib.h>
        @@ -1048,7 +1048,7 @@
        void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)

        Results

        -

        As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

        +

        As expected from the discussion above, the number of iterations is independent of the number of processors and only very slightly dependent on $h$:

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-11-15 06:44:31.287688172 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_56.html 2024-11-15 06:44:31.291688207 +0000 @@ -154,7 +154,7 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.400995

    Introduction

    Stokes Problem

    -

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    +

    The purpose of this tutorial is to create an efficient linear solver for the Stokes equation and compare it to alternative approaches. Here, we will use FGMRES with geometric multigrid as a preconditioner velocity block, and we will show in the results section that this is a fundamentally better approach than the linear solvers used in step-22 (including the scheme described in "Possible Extensions"). Fundamentally, this is because only with multigrid it is possible to get $O(n)$ solve time, where $n$ is the number of unknowns of the linear system. Using the Timer class, we collect some statistics to compare setup times, solve times, and number of iterations. We also compute errors to make sure that what we have implemented is correct.

    Let $u \in H_0^1 = \{ u \in H^1(\Omega), u|_{\partial \Omega} = 0 \}$ and $p \in L_*^2 = \{ p \in L^2(\Omega), \int_\Omega p = 0
 \}$. The Stokes equations read as follows in non-dimensionalized form:

    @@ -173,7 +173,7 @@ \left(\begin{array}{c} F \\ 0 \end{array}\right). \end{eqnarray*}" src="form_5834.png"/>

    -

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    +

    Our goal is to compare several solution approaches. While step-22 solves the linear system using a "Schur complement approach" in two separate steps, we instead attack the block system at once using FMGRES with an efficient preconditioner, in the spirit of the approach outlined in the "Results" section of step-22. The idea is as follows: if we find a block preconditioner $P$ such that the matrix

    \begin{eqnarray*}
 \left(\begin{array}{cc} A & B^T \\ B & 0 \end{array}\right) P^{-1}
@@ -198,9 +198,9 @@
 \left(\begin{array}{cc} I & 0 \\ 0 & \widetilde{S^{-1}} \end{array}\right).
   \end{eqnarray*}

    -

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    +

    Since $P$ is aimed to be a preconditioner only, we shall use the approximations on the right in the equation above.

    As discussed in step-22, $-M_p^{-1}=:\widetilde{S^{-1}} \approx
-S^{-1}$, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    +S^{-1}$" src="form_5841.png"/>, where $M_p$ is the pressure mass matrix and is solved approximately by using CG with ILU as a preconditioner, and $\widetilde{A^{-1}}$ is obtained by one of multiple methods: solving a linear system with CG and ILU as preconditioner, just using one application of an ILU, solving a linear system with CG and GMG (Geometric Multigrid as described in step-16) as a preconditioner, or just performing a single V-cycle of GMG.

    As a comparison, instead of FGMRES, we also use the direct solver UMFPACK on the whole system to compare our results with. If you want to use a direct solver (like UMFPACK), the system needs to be invertible. To avoid the one dimensional null space given by the constant pressures, we fix the first pressure unknown to zero. This is not necessary for the iterative solvers.

    Reference Solution

    The test problem is a "Manufactured Solution" (see step-7 for details), and we choose $u=(u_1,u_2,u_3)=(2\sin (\pi x), - \pi y \cos
@@ -434,7 +434,7 @@
 <div class=  return return_value;

      }
     
    -

    Implementation of $f$. See the introduction for more information.

    +

    Implementation of $f$. See the introduction for more information.

      template <int dim>
      class RightHandSide : public Function<dim>
      {
    @@ -1378,14 +1378,14 @@
      }

    Results

    Errors

    -

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    +

    We first run the code and confirm that the finite element solution converges with the correct rates as predicted by the error analysis of mixed finite element problems. Given sufficiently smooth exact solutions $u$ and $p$, the errors of the Taylor-Hood element $Q_k \times Q_{k-1}$ should be

    \[
 \| u -u_h \|_0 + h ( \| u- u_h\|_1 + \|p - p_h \|_0)
 \leq C h^{k+1} ( \|u \|_{k+1} + \| p \|_k )
 \]

    -

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    +

    see for example Ern/Guermond "Theory and Practice of Finite Elements", Section 4.2.5 p195. This is indeed what we observe, using the $Q_2 \times Q_1$ element as an example (this is what is done in the code, but is easily changed in main()):

    PETSc number of processors
    @@ -1426,7 +1426,7 @@

    The introduction also outlined another option to precondition the overall system, namely one in which we do not choose $\widetilde
 {A^{-1}}=A^{-1}$ as in the table above, but in which $\widetilde{A^{-1}}$ is only a single preconditioner application with GMG or ILU, respectively.

    This is in fact implemented in the code: Currently, the boolean use_expensive in solve() is set to true. The option mentioned above is obtained by setting it to false.

    -

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    +

    What you will find is that the number of FGMRES iterations stays constant under refinement if you use GMG this way. This means that the Multigrid is optimal and independent of $h$.

    The plain program

    /* ------------------------------------------------------------------------
    *
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-11-15 06:44:31.355688779 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_57.html 2024-11-15 06:44:31.355688779 +0000 @@ -160,7 +160,7 @@

    Introduction

    Navier Stokes Equations

    In this tutorial we show how to solve the incompressible Navier Stokes equations (NSE) with Newton's method. The flow we consider here is assumed to be steady. In a domain $\Omega \subset
-\mathbb{R}^{d}$, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    +\mathbb{R}^{d}$" src="form_5857.png"/>, $d=2,3$, with a piecewise smooth boundary $\partial \Omega$, and a given force field $\textbf{f}$, we seek a velocity field $\textbf{u}$ and a pressure field $\textbf{p}$ satisfying

    \begin{eqnarray*}
 - \nu \Delta\textbf{u} + (\textbf{u} \cdot \nabla)\textbf{u} + \nabla p &=& \textbf{f}\\
 - \nabla \cdot \textbf{u} &=& 0.
@@ -242,7 +242,7 @@
 <p>Now, Newton's iteration can be used to solve for the update terms:</p>
 <ol>
 <li>
-Initialization: Initial guess <picture><source srcset=$u_0$ and $p_0$, tolerance $\tau$; +Initialization: Initial guess $u_0$ and $p_0$, tolerance $\tau$;

  • Linear solve to compute update term $\delta\textbf{u}^{k}$ and $\delta p^k$;
  • @@ -258,7 +258,7 @@

    Finding an Initial Guess

    The initial guess needs to be close enough to the solution for Newton's method to converge; hence, finding a good starting value is crucial to the nonlinear solver.

    -

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    +

    When the viscosity $\nu$ is large, a good initial guess can be obtained by solving the Stokes equation with viscosity $\nu$. While problem dependent, this works for $\nu \geq 1/400$ for the test problem considered here.

    However, the convective term $(\mathbf{u}\cdot\nabla)\mathbf{u}$ will be dominant if the viscosity is small, like $1/7500$ in test case 2. In this situation, we use a continuation method to set up a series of auxiliary NSEs with viscosity approaching the one in the target NSE. Correspondingly, we create a sequence $\{\nu_{i}\}$ with $\nu_{n}= \nu$, and accept that the solutions to two NSE with viscosity $\nu_{i}$ and $\nu_{i+1}$ are close if $|\nu_{i} -
 \nu_{i+1}|$ is small. Then we use the solution to the NSE with viscosity $\nu_{i}$ as the initial guess of the NSE with $\nu_{i+1}$. This can be thought of as a staircase from the Stokes equations to the NSE we want to solve.

    That is, we first solve a Stokes problem

    @@ -322,8 +322,8 @@ \end{pmatrix} \end{eqnarray*}" src="form_5901.png"/>

    -

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    -

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    +

    with a parameter $\gamma$ and an invertible matrix $W$. Here $\gamma B^TW^{-1}B$ is the Augmented Lagrangian term; see [Benzi2006] for details.

    +

    Denoting the system matrix of the new system by $G$ and the right-hand side by $b$, we solve it iteratively with right preconditioning $P^{-1}$ as $GP^{-1}y = b$, where

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -332,13 +332,13 @@
   \end{pmatrix}^{-1}
 \end{eqnarray*}

    -

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    +

    with $\tilde{A} = A + \gamma B^TW^{-1}B$ and $\tilde{S}$ is the corresponding Schur complement $\tilde{S} = B^T \tilde{A}^{-1} B$. We let $W = M_p$ where $M_p$ is the pressure mass matrix, then $\tilde{S}^{-1}$ can be approximated by

    \begin{eqnarray*}
 \tilde{S}^{-1} \approx -(\nu+\gamma)M_p^{-1}.
 \end{eqnarray*}

    See [Benzi2006] for details.

    -

    We decompose $P^{-1}$ as

    +

    We decompose $P^{-1}$ as

    \begin{eqnarray*}
 P^{-1} =
   \begin{pmatrix}
@@ -355,7 +355,7 @@
   \end{pmatrix}.
 \end{eqnarray*}

    -

    Here two inexact solvers will be needed for $\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
+<p>Here two inexact solvers will be needed for <picture><source srcset=$\tilde{A}^{-1}$ and $\tilde{S}^{-1}$, respectively (see [Benzi2006]). Since the pressure mass matrix is symmetric and positive definite, CG with ILU as a preconditioner is appropriate to use for $\tilde{S}^{-1}$. For simplicity, we use the direct solver UMFPACK for $\tilde{A}^{-1}$. The last ingredient is a sparse matrix-vector product with $B^T$. Instead of computing the matrix product in the augmented Lagrangian term in $\tilde{A}$, we assemble Grad-Div stabilization $(\nabla \cdot \phi _{i}, \nabla \cdot \phi _{j}) \approx (B^T
 M_p^{-1}B)_{ij}$, as explained in [HeisterRapin2013].

    Test Case

    We use the lid driven cavity flow as our test case; see this page for details. The computational domain is the unit square and the right-hand side is $f=0$. The boundary condition is

    @@ -365,7 +365,7 @@ \end{eqnarray*}" src="form_5914.png"/>

    When solving this problem, the error consists of the nonlinear error (from Newton's iteration) and the discretization error (dependent on mesh size). The nonlinear part decreases with each Newton iteration and the discretization error reduces with mesh refinement. In this example, the solution from the coarse mesh is transferred to successively finer meshes and used as an initial guess. Therefore, the nonlinear error is always brought below the tolerance of Newton's iteration and the discretization error is reduced with each mesh refinement.

    -

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    +

    Inside the loop, we involve three solvers: one for $\tilde{A}^{-1}$, one for $M_p^{-1}$ and one for $Gx=b$. The first two solvers are invoked in the preconditioner and the outer solver gives us the update term. Overall convergence is controlled by the nonlinear residual; as Newton's method does not require an exact Jacobian, we employ FGMRES with a relative tolerance of only 1e-4 for the outer linear solver. In fact, we use the truncated Newton solve for this system. As described in step-22, the inner linear solves are also not required to be done very accurately. Here we use CG with a relative tolerance of 1e-6 for the pressure mass matrix. As expected, we still see convergence of the nonlinear residual down to 1e-14. Also, we use a simple line search algorithm for globalization of the Newton method.

    The cavity reference values for $\mathrm{Re}=400$ and $\mathrm{Re}=7500$ are from [Ghia1982] and [Erturk2005], respectively, where $\mathrm{Re}$ is the Reynolds number. Here the viscosity is defined by $1/\mathrm{Re}$. Even though we can still find a solution for $\mathrm{Re}=10000$ and the papers cited throughout this introduction contain results for comparison, we limit our discussion here to $\mathrm{Re}=7500$. This is because the solution is no longer stationary starting around $\mathrm{Re}=8000$ but instead becomes periodic, see [Bruneau2006] for details.

    The commented program

    Include files

    @@ -814,8 +814,8 @@
     
    DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
  • If we were asked to assemble the Newton matrix, then we also built a pressure mass matrix in the bottom right block of the matrix. We only need this for the preconditioner, so we need to copy it in into a separate matrix object, followed by zeroing out this block in the Newton matrix.

    -

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    -

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

    +

    Note that settings this bottom right block to zero is not identical to not assembling anything in this block, because applying boundary values and hanging node constraints (in the constraints_used.distribute_local_to_global() call above) puts entries into this block. As a consequence, setting the $(1,1)$ block to zero below does not result in what would have happened if we had just not assembled a pressure mass matrix in that block to begin with.

    +

    The difference is that if we had not assembled anything in this block, dealing with constraint degrees of freedom would have put entries on the diagonal of the $(1,1)$ block whereas the last operation below, zeroing out the entire block, results in a system matrix with rows and columns that are completely empty. In other words, the linear problem is singular. Luckily, however, the FGMRES solver we use appears to handle these rows and columns without any problem.

      if (assemble_matrix)
      {
      pressure_mass_matrix.reinit(sparsity_pattern.block(1, 1));
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-11-15 06:44:31.415689315 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_58.html 2024-11-15 06:44:31.415689315 +0000 @@ -201,7 +201,7 @@ &= 0. \end{align*}" src="form_5941.png"/>

    -

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    +

    Not surprisingly, the factor $i$ in front of the time derivative couples the real and imaginary parts of the equation. If we want to understand this equation further, take the time derivative of one of the equations, say

    \begin{align*}
   \frac{\partial^2 w}{\partial t^2}
   - \frac 12 \Delta \frac{\partial v}{\partial t}
@@ -215,9 +215,9 @@
   &= 0.
 \end{align*}

    -

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    +

    This equation is hyperbolic and similar in character to the wave equation. (This will also be obvious if you look at the video in the "Results" section of this program.) Furthermore, we could have arrived at the same equation for $v$ as well. Consequently, a better assumption for the NLSE is to think of it as a hyperbolic, wave-propagation equation than as a diffusion equation such as the heat equation. (You may wonder whether it is correct that the operator $\Delta^2$ appears with a positive sign whereas in the wave equation, $\Delta$ has a negative sign. This is indeed correct: After multiplying by a test function and integrating by parts, we want to come out with a positive (semi-)definite form. So, from $-\Delta u$ we obtain $+(\nabla v,\nabla u)$. Likewise, after integrating by parts twice, we obtain from $+\Delta^2 u$ the form $+(\Delta v,\Delta u)$. In both cases do we get the desired positive sign.)

    The real NLSE, of course, also has the terms $V\psi$ and $\kappa|\psi|^2\psi$. However, these are of lower order in the spatial derivatives, and while they are obviously important, they do not change the character of the equation.

    -

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    +

    In any case, the purpose of this discussion is to figure out what time stepping scheme might be appropriate for the equation. The conclusions is that, as a hyperbolic-kind of equation, we need to choose a time step that satisfies a CFL-type condition. If we were to use an explicit method (which we will not), we would have to investigate the eigenvalues of the matrix that corresponds to the spatial operator. If you followed the discussions of the video lectures (See also video lecture 26, video lecture 27, video lecture 28.) then you will remember that the pattern is that one needs to make sure that $k^s \propto h^t$ where $k$ is the time step, $h$ the mesh width, and $s,t$ are the orders of temporal and spatial derivatives. Whether you take the original equation ( $s=1,t=2$) or the reformulation for only the real or imaginary part, the outcome is that we would need to choose $k \propto h^2$ if we were to use an explicit time stepping method. This is not feasible for the same reasons as in step-26 for the heat equation: It would yield impractically small time steps for even only modestly refined meshes. Rather, we have to use an implicit time stepping method and can then choose a more balanced $k \propto h$. Indeed, we will use the implicit Crank-Nicolson method as we have already done in step-23 before for the regular wave equation.

    The general idea of operator splitting

    Note
    The material presented here is also discussed in video lecture 30.25. (All video lectures are also available here.)

    If one thought of the NLSE as an ordinary differential equation in which the right hand side happens to have spatial derivatives, i.e., write it as

    @@ -355,7 +355,7 @@ I^{(1)} + I^{(2)} + I^{(3)}. \end{align*}" src="form_5970.png"/>

    -

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    +

    This intuition is indeed correct, though the approximation is not exact: the difference between the exact left hand side and the term $I^{(1)}+I^{(2)}+I^{(3)}$ (i.e., the difference between the exact increment for the exact solution $\psi(t)$ when moving from $t_n$ to $t_{n+1}$, and the increment composed of the three parts on the right hand side), is proportional to $\Delta t=t_{n+1}-t_{n}$. In other words, this approach introduces an error of size ${\cal O}(\Delta t)$. Nothing we have done so far has discretized anything in time or space, so the overall error is going to be ${\cal O}(\Delta t)$ plus whatever error we commit when approximating the integrals (the temporal discretization error) plus whatever error we commit when approximating the spatial dependencies of $\psi$ (the spatial error).

    Before we continue with discussions about operator splitting, let us talk about why one would even want to go this way? The answer is simple: For some of the separate equations for the $\psi^{(k)}$, we may have ways to solve them more efficiently than if we throw everything together and try to solve it at once. For example, and particularly pertinent in the current case: The equation for $\psi^{(3)}$, i.e.,

    \begin{align*}
   \frac{d\psi^{(3)}}{dt}
@@ -389,7 +389,7 @@
 </p>
 <p> This is easy to see if (i) you plug this solution into the differential equation, and (ii) realize that the magnitude <picture><source srcset=$|\psi^{(3)}|$ is constant, i.e., the term $|\psi(t_n)|^2$ in the exponent is in fact equal to $|\psi^{(3)}(t)|^2$. In other words, the solution of the ODE for $\psi^{(3)}(t)$ only changes its phase, but the magnitude of the complex-valued function $\psi^{(3)}(t)$ remains constant. This makes computing $I^{(3)}$ particularly convenient: we don't actually need to solve any ODE, we can write the solution down by hand. Using the operator splitting approach, none of the methods to compute $I^{(1)},I^{(2)}$ therefore have to deal with the nonlinear term and all of the associated unpleasantries: we can get away with solving only linear problems, as long as we allow ourselves the luxury of using an operator splitting approach.

    Secondly, one often uses operator splitting if the different physical effects described by the different terms have different time scales. Imagine, for example, a case where we really did have some sort of diffusion equation. Diffusion acts slowly, but if $\kappa$ is large, then the "phase rotation" by the term $-i\kappa
-|\psi^{(3)}(t)|^2 \,\psi^{(3)}(t)$ acts quickly. If we treated everything together, this would imply having to take rather small time steps. But with operator splitting, we can take large time steps $\Delta t=t_{n+1}-t_{n}$ for the diffusion, and (assuming we didn't have an analytic solution) use an ODE solver with many small time steps to integrate the "phase rotation" equation for $\psi^{(3)}$ from $t_n$ to $t_{n+1}$. In other words, operator splitting allows us to decouple slow and fast time scales and treat them differently, with methods adjusted to each case.

    +|\psi^{(3)}(t)|^2 \,\psi^{(3)}(t)$" src="form_5983.png"/> acts quickly. If we treated everything together, this would imply having to take rather small time steps. But with operator splitting, we can take large time steps $\Delta t=t_{n+1}-t_{n}$ for the diffusion, and (assuming we didn't have an analytic solution) use an ODE solver with many small time steps to integrate the "phase rotation" equation for $\psi^{(3)}$ from $t_n$ to $t_{n+1}$. In other words, operator splitting allows us to decouple slow and fast time scales and treat them differently, with methods adjusted to each case.

    Operator splitting: the "Lie splitting" approach

    While the method above allows to compute the three contributions $I^{(k)}$ in parallel, if we want, the method can be made slightly more accurate and easy to implement if we don't let the trajectories for the $\psi^{(k)}$ start all at $\psi(t_n)$, but instead let the trajectory for $\psi^{(2)}$ start at the end point of the trajectory for $\psi^{(1)}$, namely $\psi^{(1)}(t_{n+1})$; similarly, we will start the trajectory for $\psi^{(3)}$ start at the end point of the trajectory for $\psi^{(2)}$, namely $\psi^{(2)}(t_{n+1})$. This method is then called "Lie splitting" and has the same order of error as the method above, i.e., the splitting error is ${\cal O}(\Delta
 t)$.

    @@ -487,7 +487,7 @@

    (Compare this again with the "exact" computation of $\psi(t_{n+1})$: It only differs in how we approximate $\psi(t)$ in each of the three integrals.) In other words, Lie splitting is a lot simpler to implement that the original method outlined above because data handling is so much simpler.

    Operator splitting: the "Strang splitting" approach

    -

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    +

    As mentioned above, Lie splitting is only ${\cal O}(\Delta t)$ accurate. This is acceptable if we were to use a first order time discretization, for example using the explicit or implicit Euler methods to solve the differential equations for $\psi^{(k)}$. This is because these time integration methods introduce an error proportional to $\Delta t$ themselves, and so the splitting error is proportional to an error that we would introduce anyway, and does not diminish the overall convergence order.

    But we typically want to use something higher order – say, a Crank-Nicolson or BDF2 method – since these are often not more expensive than a simple Euler method. It would be a shame if we were to use a time stepping method that is ${\cal O}(\Delta t^2)$, but then lose the accuracy again through the operator splitting.

    This is where the Strang splitting method comes in. It is easier to explain if we had only two parts, and so let us combine the effects of the Laplace operator and of the potential into one, and the phase rotation into a second effect. (Indeed, this is what we will do in the code since solving the equation with the Laplace equation with or without the potential costs the same – so we merge these two steps.) The Lie splitting method from above would then do the following: It computes solutions of the following two ODEs,

    \begin{align*}
@@ -565,7 +565,7 @@
   \frac 12 \Delta \psi^{(2)} + V \psi^{(2)} = 0.
 \end{align*}

    -

    This equation is linear. Furthermore, we only have to solve it from $t_n$ to $t_{n+1}$, i.e., for exactly one time step.

    +

    This equation is linear. Furthermore, we only have to solve it from $t_n$ to $t_{n+1}$, i.e., for exactly one time step.

    To do this, we will apply the second order accurate Crank-Nicolson scheme that we have already used in some of the other time dependent codes (specifically: step-23 and step-26). It reads as follows:

    \begin{align*}
   -i\frac{\psi^{(n,2)}-\psi^{(n,1)}}{k_{n+1}}
@@ -577,9 +577,9 @@
 \end{align*}

    Here, the "previous" solution $\psi^{(n,1)}$ (or the "initial -condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after +condition" for this part of the time step) is the output of the first phase rotation half-step; the output of the current step will be denoted by $\psi^{(n,2)}$. $k_{n+1}=t_{n+1}-t_n$ is the length of the time step. (One could argue whether $\psi^{(n,1)}$ and $\psi^{(n,1)}$ live at time step $n$ or $n+1$ and what their upper indices should be. This is a philosophical discussion without practical impact, and one might think of $\psi^{(n,1)}$ as something like $\psi^{(n+\tfrac 13)}$, and $\psi^{(n,2)}$ as $\psi^{(n+\tfrac 23)}$ if that helps clarify things – though, again $n+\frac 13$ is not to be understood as "one third time step after \_form#href_anchor" but more like "we've already done one third of the work necessary -for time step \_form#3170".)

    +for time step \_form#3098".)

    If we multiply the whole equation with $k_{n+1}$ and sort terms with the unknown $\psi^{(n+1,2)}$ to the left and those with the known $\psi^{(n,2)}$ to the right, then we obtain the following (spatial) partial differential equation that needs to be solved in each time step:

    \begin{align*}
   -i\psi^{(n,2)}
@@ -598,7 +598,7 @@
 <p><a class=

    Spatial discretization and dealing with complex variables

    As mentioned above, the previous tutorial program dealing with complex-valued solutions (namely, step-29) separated real and imaginary parts of the solution. It thus reduced everything to real arithmetic. In contrast, we here want to keep things complex-valued.

    The first part of this is that we need to define the discretized solution as $\psi_h^n(\mathbf x)=\sum_j \Psi^n_j \varphi_j(\mathbf
-x) \approx \psi(\mathbf x,t_n)$ where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    +x) \approx \psi(\mathbf x,t_n)$" src="form_6015.png"/> where the $\varphi_j$ are the usual shape functions (which are real valued) but the expansion coefficients $\Psi^n_j$ at time step $n$ are now complex-valued. This is easily done in deal.II: We just have to use Vector<std::complex<double>> instead of Vector<double> to store these coefficients.

    Of more interest is how to build and solve the linear system. Obviously, this will only be necessary for the second step of the Strang splitting discussed above, with the time discretization of the previous subsection. We obtain the fully discrete version through straightforward substitution of $\psi^n$ by $\psi^n_h$ and multiplication by a test function:

    \begin{align*}
   -iM\Psi^{(n,2)}
@@ -676,7 +676,7 @@
   \int_\Omega \alpha_k e^{-\frac{r_k^2}{R^2}}
 \]

    -

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    +

    is a positive integer. In other words, we need to choose $\alpha$ as an integer multiple of

    \[
   \left(\int_\Omega e^{-\frac{r_k^2}{R^2}}\right)^{-1}
   =
@@ -684,9 +684,9 @@
 \]

    assuming for the moment that $\Omega={\mathbb R}^d$ – which is of course not the case, but we'll ignore the small difference in integral.

    -

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
-0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    -

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    +

    Thus, we choose $\alpha_k=\left(R^d\sqrt{\pi^d}\right)^{-1}$ for all, and $R=0.1$. This $R$ is small enough that the difference between the exact (infinite) integral and the integral over $\Omega$ should not be too concerning. We choose the four points $\mathbf x_k$ as $(\pm 0.3, 0), (0, \pm
+0.3)$ – also far enough away from the boundary of $\Omega$ to keep ourselves on the safe side.

    +

    For simplicity, we pose the problem on the square $[-1,1]^2$. For boundary conditions, we will use time-independent Neumann conditions of the form

    \[
   \nabla\psi(\mathbf x,t)\cdot \mathbf n=0 \qquad\qquad \forall \mathbf x\in\partial\Omega.
 \] @@ -702,7 +702,7 @@ \end{cases} \]" src="form_6046.png"/>

    -

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    +

    Using a large potential makes sure that the wave function $\psi$ remains small outside the circle of radius 0.7. All of the Gaussians that make up the initial conditions are within this circle, and the solution will mostly oscillate within it, with a small amount of energy radiating into the outside. The use of a large potential also makes sure that the nonphysical boundary condition does not have too large an effect.

    The commented program

    Include files

    The program starts with the usual include files, all of which you should have seen before by now:

    @@ -868,7 +868,7 @@
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

    Implementation of the NonlinearSchroedingerEquation class

    -

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

    +

    We start by specifying the implementation of the constructor of the class. There is nothing of surprise to see here except perhaps that we choose quadratic ( $Q_2$) Lagrange elements – the solution is expected to be smooth, so we choose a higher polynomial degree than the bare minimum.

      template <int dim>
      NonlinearSchroedingerEquation<dim>::NonlinearSchroedingerEquation()
      : fe(2)
    @@ -1011,7 +1011,7 @@

    An important realization is that while $\psi^{(n,0)}(\mathbf x)$ may be a finite element function (i.e., is piecewise polynomial), this may not necessarily be the case for the "rotated" function in which we have updated the phase using the exponential factor (recall that the amplitude of that function remains constant as part of that step). In other words, we could compute $\psi^{(n,1)}(\mathbf x)$ at every point $\mathbf x\in\Omega$, but we can't represent it on a mesh because it is not a piecewise polynomial function. The best we can do in a discrete setting is to compute a projection or interpolation. In other words, we can compute $\psi_h^{(n,1)}(\mathbf x) = \Pi_h
    \left(e^{-i\kappa|\psi_h^{(n,0)}(\mathbf x)|^2 \tfrac 12\Delta t}
    \; \psi_h^{(n,0)}(\mathbf x) \right)$ where $\Pi_h$ is a projection or interpolation operator. The situation is particularly simple if we choose the interpolation: Then, all we need to compute is the value of the right hand side at the node points and use these as nodal values for the vector $\Psi^{(n,1)}$ of degrees of freedom. This is easily done because evaluating the right hand side at node points for a Lagrange finite element as used here requires us to only look at a single (complex-valued) entry of the node vector. In other words, what we need to do is to compute $\Psi^{(n,1)}_j = e^{-i\kappa|\Psi^{(n,0)}_j|^2 \tfrac
-   12\Delta t} \; \Psi^{(n,0)}_j$ where $j$ loops over all of the entries of our solution vector. This is what the function below does – in fact, it doesn't even use separate vectors for $\Psi^{(n,0)}$ and $\Psi^{(n,1)}$, but just updates the same vector as appropriate.

    + 12\Delta t} \; \Psi^{(n,0)}_j$" src="form_6058.png"/> where $j$ loops over all of the entries of our solution vector. This is what the function below does – in fact, it doesn't even use separate vectors for $\Psi^{(n,0)}$ and $\Psi^{(n,1)}$, but just updates the same vector as appropriate.

      template <int dim>
      void NonlinearSchroedingerEquation<dim>::do_half_phase_step()
      {
    @@ -1028,7 +1028,7 @@
     
     
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    -

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    +

    The next step is to solve for the linear system in each time step, i.e., the second half step of the Strang splitting we use. Recall that it had the form $C\Psi^{(n,2)} = R\Psi^{(n,1)}$ where $C$ and $R$ are the matrices we assembled earlier.

    The way we solve this here is using a direct solver. We first form the right hand side $r=R\Psi^{(n,1)}$ using the SparseMatrix::vmult() function and put the result into the system_rhs variable. We then call SparseDirectUMFPACK::solver() which takes as argument the matrix $C$ and the right hand side vector and returns the solution in the same vector system_rhs. The final step is then to put the solution so computed back into the solution variable.

      template <int dim>
      void NonlinearSchroedingerEquation<dim>::do_full_spatial_step()
    @@ -1300,7 +1300,7 @@
    • First, we should make use of the fact that the matrix doesn't actually change from time step to time step. This is an artifact of the fact that we here have constant boundary values and that we don't change the time step size – two assumptions that might not be true in actual applications. But at least in cases where this does happen to be the case, it would make sense to only factorize the matrix once (i.e., compute $L$ and $U$ factors once) and then use these factors for all following time steps until the matrix $C$ changes and requires a new factorization. The interface of the SparseDirectUMFPACK class allows for this.
    • Ultimately, however, sparse direct solvers are only efficient for relatively small problems, say up to a few 100,000 unknowns. Beyond this, one needs iterative solvers such as the Conjugate Gradient method (for symmetric and positive definite problems) or GMRES. We have used many of these in other tutorial programs. In all cases, they need to be accompanied by good preconditioners. For the current case, one could in principle use GMRES – a method that does not require any specific properties of the matrix – as the outer solver but at least at the time of writing this sentence (in 2022), the SolverGMRES class can only handle real-valued linear systems. This can be overcome by implementing a variation of GMRES that can deal with complex-valued matrices and vectors, see for example [Fraysse2005] . Even better would be to implement an iterative scheme that exploits the one structural feature we know is true for this problem: That the matrix is complex-symmetric (albeit not Hermitian), for which a literature search would probably find schemes as well.
    • -
    • A different strategy towards iterative solvers would be to break the linear system into a $2\times 2$ block system of real and imaginary components, like we did in step-29. This would then enable using real-valued iterative solvers on the outer level (e.g., the existing GMRES implementation), but one would have to come up with preconditioners that exploit the block structure. There is, again, literature on the topic, of which we simply point out a non-representative sample: [Axelsson2014] , [Day2001] , [Liao2016] .
    • +
    • A different strategy towards iterative solvers would be to break the linear system into a $2\times 2$ block system of real and imaginary components, like we did in step-29. This would then enable using real-valued iterative solvers on the outer level (e.g., the existing GMRES implementation), but one would have to come up with preconditioners that exploit the block structure. There is, again, literature on the topic, of which we simply point out a non-representative sample: [Axelsson2014] , [Day2001] , [Liao2016] .

    Boundary conditions

    In order to be usable for actual, realistic problems, solvers for the nonlinear Schrödinger equation need to utilize boundary conditions that make sense for the problem at hand. We have here restricted ourselves to simple Neumann boundary conditions – but these do not actually make sense for the problem. Indeed, the equations are generally posed on an infinite domain. But, since we can't compute on infinite domains, we need to truncate it somewhere and instead pose boundary conditions that make sense for this artificially small domain. The approach widely used is to use the Perfectly Matched Layer method that corresponds to a particular kind of attenuation. It is, in a different context, also used in step-62.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-11-15 06:44:31.475689851 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_59.html 2024-11-15 06:44:31.479689886 +0000 @@ -137,33 +137,33 @@

    This work was partly supported by the German Research Foundation (DFG) through the project "High-order discontinuous Galerkin for the exa-scale" (ExaDG) within the priority program "Software for Exascale Computing" (SPPEXA).

    Introduction

    Matrix-free operator evaluation enables very efficient implementations of discretization with high-order polynomial bases due to a method called sum factorization. This concept has been introduced in the step-37 and step-48 tutorial programs. In this tutorial program, we extend those concepts to discontinuous Galerkin (DG) schemes that include face integrals, a class of methods where high orders are particularly widespread.

    -

    The underlying idea of the matrix-free evaluation is the same as for continuous elements: The matrix-vector product that appears in an iterative solver or multigrid smoother is not implemented by a classical sparse matrix kernel, but instead applied implicitly by the evaluation of the underlying integrals on the fly. For tensor product shape functions that are integrated with a tensor product quadrature rule, this evaluation is particularly efficient by using the sum-factorization technique, which decomposes the initially $(k+1)^{2d}$ operations for interpolation involving $(k+1)^d$ vector entries with associated shape functions at degree $k$ in $d$ dimensions to $(k+1)^d$ quadrature points into $d$ one-dimensional operations of cost $(k+1)^{d+1}$ each. In 3D, this reduces the order of complexity by two powers in $k$. When measured as the complexity per degree of freedom, the complexity is $\mathcal O(k)$ in the polynomial degree. Due to the presence of face integrals in DG, and due to the fact that operations on quadrature points involve more memory transfer, which both scale as $\mathcal O(1)$, the observed complexity is often constant for moderate $k\leq 10$. This means that a high order method can be evaluated with the same throughput in terms of degrees of freedom per second as a low-order method.

    +

    The underlying idea of the matrix-free evaluation is the same as for continuous elements: The matrix-vector product that appears in an iterative solver or multigrid smoother is not implemented by a classical sparse matrix kernel, but instead applied implicitly by the evaluation of the underlying integrals on the fly. For tensor product shape functions that are integrated with a tensor product quadrature rule, this evaluation is particularly efficient by using the sum-factorization technique, which decomposes the initially $(k+1)^{2d}$ operations for interpolation involving $(k+1)^d$ vector entries with associated shape functions at degree $k$ in $d$ dimensions to $(k+1)^d$ quadrature points into $d$ one-dimensional operations of cost $(k+1)^{d+1}$ each. In 3D, this reduces the order of complexity by two powers in $k$. When measured as the complexity per degree of freedom, the complexity is $\mathcal O(k)$ in the polynomial degree. Due to the presence of face integrals in DG, and due to the fact that operations on quadrature points involve more memory transfer, which both scale as $\mathcal O(1)$, the observed complexity is often constant for moderate $k\leq 10$. This means that a high order method can be evaluated with the same throughput in terms of degrees of freedom per second as a low-order method.

    More information on the algorithms are available in the preprint
    Fast matrix-free evaluation of discontinuous Galerkin finite element operators by Martin Kronbichler and Katharina Kormann, arXiv:1711.03590.

    The symmetric interior penalty formulation for the Laplacian

    For this tutorial program, we exemplify the matrix-free DG framework for the interior penalty discretization of the Laplacian, i.e., the same scheme as the one used for the step-39 tutorial program. The discretization of the Laplacian is given by the following weak form

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 &\sum_{K\in\text{cells}} \left(\nabla v_h, \nabla u_h\right)_{K}+\\
 &\sum_{F\in\text{faces}}\Big(-\left<\jump{v_h}, \average{\nabla u_h}\right>_{F} - \left<\average{\nabla v_h}, \jump{u_h}\right>_{F} + \left<\jump{v_h}, \sigma \jump{u_h}\right>_{F}\Big) \\
 &= \sum_{K\in\text{cells}}\left(v_h, f\right)_{K},
-\end{align*} +\end{align*}" src="form_6083.png"/>

    -

    where $\jump{v} = v^- \mathbf{n}^- + v^+ \mathbf{n}^+ = \mathbf n^{-}
-\left(v^- - v^+\right)$ denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    -

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    -

    In the implementation below, we implement the weak form above by moving the normal vector $\mathbf{n}^-$ from the jump terms to the derivatives to form a normal derivative of the form $\mathbf{n}^-\cdot \nabla u_h$. This makes the implementation on quadrature points slightly more efficient because we only need to work with scalar terms rather than tensors, and is mathematically equivalent.

    -

    For boundary conditions, we use the so-called mirror principle that defines artificial exterior values $u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
-g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ =
--\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries, for given Dirichlet values $g_\text{D}$ and Neumann values $g_\text{N}$. These expressions are then inserted in the above weak form. Contributions involving the known quantities $g_\text{D}$ and $g_\text{N}$ are eventually moved to the right hand side, whereas the unknown value $u^-$ is retained on the left hand side and contributes to the matrix terms similarly as interior faces. Upon these manipulations, the same weak form as in step-39 is obtained.

    +

    where $\jump{v} = v^- \mathbf{n}^- + v^+ \mathbf{n}^+ = \mathbf n^{-}
+\left(v^- - v^+\right)$ denotes the directed jump of the quantity $v$ from the two associated cells $K^-$ and $K^+$, and $\average{v}=\frac{v^- + v^+}{2}$ is the average from both sides.

    +

    The terms in the equation represent the cell integral after integration by parts, the primal consistency term that arises at the element interfaces due to integration by parts and insertion of an average flux, the adjoint consistency term that is added for restoring symmetry of the underlying matrix, and a penalty term with factor $\sigma$, whose magnitude is equal the length of the cells in direction normal to face multiplied by $k(k+1)$, see step-39. The penalty term is chosen such that an inverse estimate holds and the final weak form is coercive, i.e., positive definite in the discrete setting. The adjoint consistency term and the penalty term involve the jump $\jump{u_h}$ at the element interfaces, which disappears for the analytic solution $u$. Thus, these terms are consistent with the original PDE, ensuring that the method can retain optimal orders of convergence.

    +

    In the implementation below, we implement the weak form above by moving the normal vector $\mathbf{n}^-$ from the jump terms to the derivatives to form a normal derivative of the form $\mathbf{n}^-\cdot \nabla u_h$. This makes the implementation on quadrature points slightly more efficient because we only need to work with scalar terms rather than tensors, and is mathematically equivalent.

    +

    For boundary conditions, we use the so-called mirror principle that defines artificial exterior values $u^+$ by extrapolation from the interior solution $u^-$ combined with the given boundary data, setting $u^+ = -u^- + 2
+g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ =
+-\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries, for given Dirichlet values $g_\text{D}$ and Neumann values $g_\text{N}$. These expressions are then inserted in the above weak form. Contributions involving the known quantities $g_\text{D}$ and $g_\text{N}$ are eventually moved to the right hand side, whereas the unknown value $u^-$ is retained on the left hand side and contributes to the matrix terms similarly as interior faces. Upon these manipulations, the same weak form as in step-39 is obtained.

    Face integration support in MatrixFree and FEFaceEvaluation

    The matrix-free framework of deal.II provides the necessary infrastructure to implement the action of the discretized equation above. As opposed to the MatrixFree::cell_loop() that we used in step-37 and step-48, we now build a code in terms of MatrixFree::loop() that takes three function pointers, one for the cell integrals, one for the inner face integrals, and one for the boundary face integrals (in analogy to the design of MeshWorker used in the step-39 tutorial program). In each of these three functions, we then implement the respective terms on the quadrature points. For interpolation between the vector entries and the values and gradients on quadrature points, we use the class FEEvaluation for cell contributions and FEFaceEvaluation for face contributions. The basic usage of these functions has been discussed extensively in the step-37 tutorial program.

    -

    In MatrixFree::loop(), all interior faces are visited exactly once, so one must make sure to compute the contributions from both the test functions $v_h^-$ and $v_h^+$. Given the fact that the test functions on both sides are indeed independent, the weak form above effectively means that we submit the same contribution to both an FEFaceEvaluation object called phi_inner and phi_outer for testing with the normal derivative of the test function, and values with opposite sign for testing with the values of the test function, because the latter involves opposite signs due to the jump term. For faces between cells of different refinement level, the integration is done from the refined side, and FEFaceEvaluation automatically performs interpolation to a subface on the coarse side. Thus, a hanging node never appears explicitly in a user implementation of a weak form.

    +

    In MatrixFree::loop(), all interior faces are visited exactly once, so one must make sure to compute the contributions from both the test functions $v_h^-$ and $v_h^+$. Given the fact that the test functions on both sides are indeed independent, the weak form above effectively means that we submit the same contribution to both an FEFaceEvaluation object called phi_inner and phi_outer for testing with the normal derivative of the test function, and values with opposite sign for testing with the values of the test function, because the latter involves opposite signs due to the jump term. For faces between cells of different refinement level, the integration is done from the refined side, and FEFaceEvaluation automatically performs interpolation to a subface on the coarse side. Thus, a hanging node never appears explicitly in a user implementation of a weak form.

    The fact that each face is visited exactly once also applies to those faces at subdomain boundaries between different processors when parallelized with MPI, where one cell belongs to one processor and one to the other. The setup in MatrixFree::reinit() splits the faces between the two sides, and eventually only reports the faces actually handled locally in MatrixFree::n_inner_face_batches() and MatrixFree::n_boundary_face_batches(), respectively. Note that, in analogy to the cell integrals discussed in step-37, deal.II applies vectorization over several faces to use SIMD, working on something we call a batch of faces with a single instruction. The face batches are independent from the cell batches, even though the time at which face integrals are processed is kept close to the time when the cell integrals of the respective cells are processed, in order to increase the data locality.

    -

    Another thing that is new in this program is the fact that we no longer split the vector access like FEEvaluation::read_dof_values() or FEEvaluation::distribute_local_to_global() from the evaluation and integration steps, but call combined functions FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter(), respectively. This is useful for face integrals because, depending on what gets evaluated on the faces, not all vector entries of a cell must be touched in the first place. Think for example of the case of the nodal element FE_DGQ with node points on the element surface: If we are interested in the shape function values on a face, only $(k+ 1)^{d-1}$ degrees of freedom contribute to them in a non-trivial way (in a more technical way of speaking, only $(k+1)^{d-1}$ shape functions have a nonzero support on the face and return true for FiniteElement::has_support_on_face()). When compared to the $(k+1)^d$ degrees of freedom of a cell, this is one power less.

    +

    Another thing that is new in this program is the fact that we no longer split the vector access like FEEvaluation::read_dof_values() or FEEvaluation::distribute_local_to_global() from the evaluation and integration steps, but call combined functions FEEvaluation::gather_evaluate() and FEEvaluation::integrate_scatter(), respectively. This is useful for face integrals because, depending on what gets evaluated on the faces, not all vector entries of a cell must be touched in the first place. Think for example of the case of the nodal element FE_DGQ with node points on the element surface: If we are interested in the shape function values on a face, only $(k+ 1)^{d-1}$ degrees of freedom contribute to them in a non-trivial way (in a more technical way of speaking, only $(k+1)^{d-1}$ shape functions have a nonzero support on the face and return true for FiniteElement::has_support_on_face()). When compared to the $(k+1)^d$ degrees of freedom of a cell, this is one power less.

    Now of course we are not interested in only the function values, but also the derivatives on the cell. Fortunately, there is an element in deal.II that extends this property of reduced access also for derivatives on faces, the FE_DGQHermite element.

    The FE_DGQHermite element

    The element FE_DGQHermite belongs to the family of FE_DGQ elements, i.e., its shape functions are a tensor product of 1D polynomials and the element is fully discontinuous. As opposed to the nodal character in the usual FE_DGQ element, the FE_DGQHermite element is a mixture of nodal contributions and derivative contributions based on a Hermite-like concept. The underlying polynomial class is Polynomials::HermiteLikeInterpolation and can be summarized as follows: For cubic polynomials, we use two polynomials to represent the function value and first derivative at the left end of the unit interval, $x=0$, and two polynomials to represent the function value and first derivative and the right end of the unit interval, $x=1$. At the opposite ends, both the value and first derivative of the shape functions are zero, ensuring that only two out of the four basis functions contribute to values and derivative on the respective end. However, we deviate from the classical Hermite interpolation in not strictly assigning one degree of freedom for the value and one for the first derivative, but rather allow the first derivative to be a linear combination of the first and the second shape function. This is done to improve the conditioning of the interpolation. Also, when going to degrees beyond three, we add node points in the element interior in a Lagrange-like fashion, combined with double zeros in the points $x=0$ and $x=1$. The position of these extra nodes is determined by the zeros of some Jacobi polynomials as explained in the description of the class Polynomials::HermiteLikeInterpolation.

    -

    Using this element, we only need to access $2(k+1)^{d-1}$ degrees of freedom for computing both values and derivatives on a face. The check whether the Hermite property is fulfilled is done transparently inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter() that check the type of the basis and reduce the access to data if possible. Obviously, this would not be possible if we had separated FEFaceEvaluation::read_dof_values() from FEFaceEvaluation::evaluate(), because the amount of entries we need to read depends on the type of the derivative (only values, first derivative, etc.) and thus must be given to read_dof_values().

    +

    Using this element, we only need to access $2(k+1)^{d-1}$ degrees of freedom for computing both values and derivatives on a face. The check whether the Hermite property is fulfilled is done transparently inside FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter() that check the type of the basis and reduce the access to data if possible. Obviously, this would not be possible if we had separated FEFaceEvaluation::read_dof_values() from FEFaceEvaluation::evaluate(), because the amount of entries we need to read depends on the type of the derivative (only values, first derivative, etc.) and thus must be given to read_dof_values().

    This optimization is not only useful for computing the face integrals, but also for the MPI ghost layer exchange: In a naive exchange, we would need to send all degrees of freedom of a cell to another processor if the other processor is responsible for computing the face's contribution. Since we know that only some of the degrees of freedom in the evaluation with FEFaceEvaluation are touched, it is natural to only exchange the relevant ones. The MatrixFree::loop() function has support for a selected data exchange when combined with LinearAlgebra::distributed::Vector. To make this happen, we need to tell the loop what kind of evaluation on faces we are going to do, using an argument of type MatrixFree::DataAccessOnFaces, as can be seen in the implementation of LaplaceOperator::vmult() below. The way data is exchanged in that case is as follows: The ghost layer data in the vector still pretends to represent all degrees of freedom, such that FEFaceEvaluation can continue to read the values as if the cell were a locally owned one. The data exchange routines take care of the task for packing and unpacking the data into this format. While this sounds pretty complicated, we will show in the results section below that this really pays off by comparing the performance to a baseline code that does not specify the data access on faces.

    An approximate block-Jacobi smoother using the fast diagonalization method

    In the tradition of the step-37 program, we again solve a Poisson problem with a geometric multigrid preconditioner inside a conjugate gradient solver. Instead of computing the diagonal and use the basic PreconditionChebyshev as a smoother, we choose a different strategy in this tutorial program. We implement a block-Jacobi preconditioner, where a block refers to all degrees of freedom on a cell. Rather than building the full cell matrix and applying its LU factorization (or inverse) in the preconditioner — an operation that would be heavily memory bandwidth bound and thus pretty slow — we approximate the inverse of the block by a special technique called fast diagonalization method.

    @@ -173,11 +173,11 @@ \end{align*}" src="form_2023.png"/>

    in 2D and

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 L &= A_2 \otimes M_1 \otimes M_0 + M_2 \otimes A_1 \otimes M_0 + M_2 \otimes M_1 \otimes A_0
-\end{align*} +\end{align*}" src="form_6102.png"/>

    -

    in 3D. The matrices $A_0$ and $A_1$ denote the 1D Laplace matrix (including the cell and face term associated to the current cell values $u^-_h$ and $v^-_h$) and $M_0$ and $M_1$ are the mass matrices. Note that this simple tensor product structure is lost once there are non-constant coefficients on the cell or the geometry is not constant any more. We mention that a similar setup could also be used to replace the computed integrals with this final tensor product form of the matrices, which would cut the operations for the operator evaluation into less than half. However, given the fact that this only holds for Cartesian cells and constant coefficients, which is a pretty narrow case, we refrain from pursuing this idea.

    +

    in 3D. The matrices $A_0$ and $A_1$ denote the 1D Laplace matrix (including the cell and face term associated to the current cell values $u^-_h$ and $v^-_h$) and $M_0$ and $M_1$ are the mass matrices. Note that this simple tensor product structure is lost once there are non-constant coefficients on the cell or the geometry is not constant any more. We mention that a similar setup could also be used to replace the computed integrals with this final tensor product form of the matrices, which would cut the operations for the operator evaluation into less than half. However, given the fact that this only holds for Cartesian cells and constant coefficients, which is a pretty narrow case, we refrain from pursuing this idea.

    Interestingly, the exact inverse of the matrix $L$ can be found through tensor products due to a method introduced by Lynch et al. [Lynch1964] in 1964,

    \begin{align*}
 L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1}
@@ -185,9 +185,9 @@
 \end{align*}

    where $S_d$ is the matrix of eigenvectors to the generalized eigenvalue problem in the given tensor direction $d$:

    -\begin{align*}
+<picture><source srcset=\begin{align*}
 A_d s  &= \lambda M_d s, \quad d = 0, \ldots,\mathrm{dim-1},
-\end{align*} +\end{align*}" src="form_6109.png"/>

    and $\Lambda_d$ is the diagonal matrix representing the generalized eigenvalues $\lambda$. Note that the vectors $s$ are such that they simultaneously diagonalize $A_d$ and $M_d$, i.e. $S_d^{\mathrm T} A_d S_d =
 \Lambda_d$ and $S_d^{\mathrm T} M_d S_d = I$.

    @@ -240,7 +240,7 @@
      const unsigned int dimension = 3;
     

    Equation data

    -

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

    +

    In analogy to step-7, we define an analytic solution that we try to reproduce with our discretization. Since the aim of this tutorial is to show matrix-free methods, we choose one of the simplest possibilities, namely a cosine function whose derivatives are simple enough for us to compute analytically. Further down, the wave number 2.4 we select here will be matched with the domain extent in $x$-direction that is 2.5, such that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three full wave revolutions in the cosine. The first function defines the solution and its gradient for expressing the analytic solution for the Dirichlet and Neumann boundary conditions, respectively. Furthermore, a class representing the negative Laplacian of the solution is used to represent the right hand side (forcing) function that we use to match the given analytic solution in the discretized version (manufactured solution).

      template <int dim>
      class Solution : public Function<dim>
      {
    @@ -472,7 +472,7 @@

    The second new feature is the fact that we do not implement a vmult_add() function as we did in step-37 (through the virtual function MatrixFreeOperators::Base::vmult_add()), but directly implement a vmult() functionality. Since both cell and face integrals will sum into the destination vector, we must of course zero the vector somewhere. For DG elements, we are given two options – one is to use FEEvaluation::set_dof_values() instead of FEEvaluation::distribute_local_to_global() in the apply_cell function below. This works because the loop layout in MatrixFree is such that cell integrals always touch a given vector entry before the face integrals. However, this really only works for fully discontinuous bases where every cell has its own degrees of freedom, without any sharing with neighboring results. An alternative setup, the one chosen here, is to let the MatrixFree::loop() take care of zeroing the vector. This can be thought of as simply calling dst = 0; somewhere in the code. The implementation is more involved for supported vectors such as LinearAlgebra::distributed::Vector, because we aim to not zero the whole vector at once. Doing the zero operation on a small enough pieces of a few thousands of vector entries has the advantage that the vector entries that get zeroed remain in caches before they are accessed again in FEEvaluation::distribute_local_to_global() and FEFaceEvaluation::distribute_local_to_global(). Since matrix-free operator evaluation is really fast, just zeroing a large vector can amount to up to a 25% of the operator evaluation time, and we obviously want to avoid this cost. This option of zeroing the vector is also available for MatrixFree::cell_loop and for continuous bases, even though it was not used in the step-37 or step-48 tutorial programs.

    The third new feature is the way we provide the functions to compute on cells, inner faces, and boundary faces: The class MatrixFree has a function called loop that takes three function pointers to the three cases, allowing to separate the implementations of different things. As explained in step-37, these function pointers can be std::function objects or member functions of a class. In this case, we use pointers to member functions.

    The final new feature are the last two arguments of type MatrixFree::DataAccessOnFaces that can be given to MatrixFree::loop(). This class passes the type of data access for face integrals to the MPI data exchange routines LinearAlgebra::distributed::Vector::update_ghost_values() and LinearAlgebra::distributed::Vector::compress() of the parallel vectors. The purpose is to not send all degrees of freedom of a neighboring element, but to reduce the amount of data to what is really needed for the computations at hand. The data exchange is a real bottleneck in particular for high-degree DG methods, therefore a more restrictive way of exchange is often beneficial. The enum field MatrixFree::DataAccessOnFaces can take the value none, which means that no face integrals at all are done, which would be analogous to MatrixFree::cell_loop(), the value values meaning that only shape function values (but no derivatives) are used on faces, and the value gradients when also first derivatives on faces are accessed besides the values. A value unspecified means that all degrees of freedom will be exchanged for the faces that are located at the processor boundaries and designated to be worked on at the local processor.

    -

    To see how the data can be reduced, think of the case of the nodal element FE_DGQ with node points on the element surface, where only $(k+1)^{d-1}$ degrees of freedom contribute to the values on a face for polynomial degree $k$ in $d$ space dimensions, out of the $(k+1)^d$ degrees of freedom of a cell. A similar reduction is also possible for the interior penalty method that evaluates values and first derivatives on the faces. When using a Hermite-like basis in 1d, only up to two basis functions contribute to the value and derivative. The class FE_DGQHermite implements a tensor product of this concept, as discussed in the introduction. Thus, only $2(k+1)^{d-1}$ degrees of freedom must be exchanged for each face, which is a clear win once $k$ gets larger than four or five. Note that this reduced exchange of FE_DGQHermite is valid also on meshes with curved boundaries, as the derivatives are taken on the reference element, whereas the geometry only mixes them on the inside. Thus, this is different from the attempt to obtain $C^1$ continuity with continuous Hermite-type shape functions where the non-Cartesian case changes the picture significantly. Obviously, on non-Cartesian meshes the derivatives also include tangential derivatives of shape functions beyond the normal derivative, but those only need the function values on the element surface, too. Should the element not provide any compression, the loop automatically exchanges all entries for the affected cells.

    +

    To see how the data can be reduced, think of the case of the nodal element FE_DGQ with node points on the element surface, where only $(k+1)^{d-1}$ degrees of freedom contribute to the values on a face for polynomial degree $k$ in $d$ space dimensions, out of the $(k+1)^d$ degrees of freedom of a cell. A similar reduction is also possible for the interior penalty method that evaluates values and first derivatives on the faces. When using a Hermite-like basis in 1d, only up to two basis functions contribute to the value and derivative. The class FE_DGQHermite implements a tensor product of this concept, as discussed in the introduction. Thus, only $2(k+1)^{d-1}$ degrees of freedom must be exchanged for each face, which is a clear win once $k$ gets larger than four or five. Note that this reduced exchange of FE_DGQHermite is valid also on meshes with curved boundaries, as the derivatives are taken on the reference element, whereas the geometry only mixes them on the inside. Thus, this is different from the attempt to obtain $C^1$ continuity with continuous Hermite-type shape functions where the non-Cartesian case changes the picture significantly. Obviously, on non-Cartesian meshes the derivatives also include tangential derivatives of shape functions beyond the normal derivative, but those only need the function values on the element surface, too. Should the element not provide any compression, the loop automatically exchanges all entries for the affected cells.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::vmult(
    @@ -543,7 +543,7 @@
      for (unsigned int face = face_range.first; face < face_range.second; ++face)
      {
    -

    On a given batch of faces, we first update the pointers to the current face and then access the vector. As mentioned above, we combine the vector access with the evaluation. In the case of face integrals, the data access into the vector can be reduced for the special case of an FE_DGQHermite basis as explained for the data exchange above: Since only $2(k+1)^{d-1}$ out of the $(k+1)^d$ cell degrees of freedom get multiplied by a non-zero value or derivative of a shape function, this structure can be utilized for the evaluation, significantly reducing the data access. The reduction of the data access is not only beneficial because it reduces the data in flight and thus helps caching, but also because the data access to faces is often more irregular than for cell integrals when gathering values from cells that are farther apart in the index list of cells.

    +

    On a given batch of faces, we first update the pointers to the current face and then access the vector. As mentioned above, we combine the vector access with the evaluation. In the case of face integrals, the data access into the vector can be reduced for the special case of an FE_DGQHermite basis as explained for the data exchange above: Since only $2(k+1)^{d-1}$ out of the $(k+1)^d$ cell degrees of freedom get multiplied by a non-zero value or derivative of a shape function, this structure can be utilized for the evaluation, significantly reducing the data access. The reduction of the data access is not only beneficial because it reduces the data in flight and thus helps caching, but also because the data access to faces is often more irregular than for cell integrals when gathering values from cells that are farther apart in the index list of cells.

      phi_inner.reinit(face);
      phi_inner.gather_evaluate(src,
    @@ -554,7 +554,7 @@
     
    -

    The next two statements compute the penalty parameter for the interior penalty method. As explained in the introduction, we would like to have a scaling like $\frac{1}{h_\text{i}}$ of the length $h_\text{i}$ normal to the face. For a general non-Cartesian mesh, this length must be computed by the product of the inverse Jacobian times the normal vector in real coordinates. From this vector of dim components, we must finally pick the component that is oriented normal to the reference cell. In the geometry data stored in MatrixFree, a permutation of the components in the Jacobian is applied such that this latter direction is always the last component dim-1 (this is beneficial because reference-cell derivative sorting can be made agnostic of the direction of the face). This means that we can simply access the last entry dim-1 and must not look up the local face number in data.get_face_info(face).interior_face_no and data.get_face_info(face).exterior_face_no. Finally, we must also take the absolute value of these factors as the normal could point into either positive or negative direction.

    +

    The next two statements compute the penalty parameter for the interior penalty method. As explained in the introduction, we would like to have a scaling like $\frac{1}{h_\text{i}}$ of the length $h_\text{i}$ normal to the face. For a general non-Cartesian mesh, this length must be computed by the product of the inverse Jacobian times the normal vector in real coordinates. From this vector of dim components, we must finally pick the component that is oriented normal to the reference cell. In the geometry data stored in MatrixFree, a permutation of the components in the Jacobian is applied such that this latter direction is always the last component dim-1 (this is beneficial because reference-cell derivative sorting can be made agnostic of the direction of the face). This means that we can simply access the last entry dim-1 and must not look up the local face number in data.get_face_info(face).interior_face_no and data.get_face_info(face).exterior_face_no. Finally, we must also take the absolute value of these factors as the normal could point into either positive or negative direction.

      const VectorizedArray<number> inverse_length_normal_to_face =
      0.5 * (std::abs((phi_inner.normal_vector(0) *
      phi_inner.inverse_jacobian(0))[dim - 1]) +
    @@ -564,7 +564,7 @@
      inverse_length_normal_to_face * get_penalty_factor();
     
    ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
    -

    In the loop over the quadrature points, we eventually compute all contributions to the interior penalty scheme. According to the formulas in the introduction, the value of the test function gets multiplied by the difference of the jump in the solution times the penalty parameter and the average of the normal derivative in real space. Since the two evaluators for interior and exterior sides get different signs due to the jump, we pass the result with a different sign here. The normal derivative of the test function gets multiplied by the negative jump in the solution between the interior and exterior side. This term, coined adjoint consistency term, must also include the factor of $\frac{1}{2}$ in the code in accordance with its relation to the primal consistency term that gets the factor of one half due to the average in the test function slot.

    +

    In the loop over the quadrature points, we eventually compute all contributions to the interior penalty scheme. According to the formulas in the introduction, the value of the test function gets multiplied by the difference of the jump in the solution times the penalty parameter and the average of the normal derivative in real space. Since the two evaluators for interior and exterior sides get different signs due to the jump, we pass the result with a different sign here. The normal derivative of the test function gets multiplied by the negative jump in the solution between the interior and exterior side. This term, coined adjoint consistency term, must also include the factor of $\frac{1}{2}$ in the code in accordance with its relation to the primal consistency term that gets the factor of one half due to the average in the test function slot.

      for (const unsigned int q : phi_inner.quadrature_point_indices())
      {
      const VectorizedArray<number> solution_jump =
    @@ -595,11 +595,11 @@
     
     
     
    -

    The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
-   g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
-   u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla
-   u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    -

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

    +

    The boundary face function follows by and large the interior face function. The only difference is the fact that we do not have a separate FEFaceEvaluation object that provides us with exterior values $u^+$, but we must define them from the boundary conditions and interior values $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2
+   g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla
+   u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla
+   u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann boundaries. Since this operation implements the homogeneous part, i.e., the matrix-vector product, we must neglect the boundary functions $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side in LaplaceProblem::compute_rhs(). Note that due to extension of the solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ the same as in the inner face function, see also the discussion in step-39.

    +

    There is one catch at this point: The implementation below uses a boolean variable is_dirichlet to switch between the Dirichlet and the Neumann cases. However, we solve a problem where we also want to impose periodic boundary conditions on some boundaries, namely along those in the $x$ direction. One might wonder how those conditions should be handled here. The answer is that MatrixFree automatically treats periodic boundaries as what they are technically, namely an inner face where the solution values of two adjacent cells meet and must be treated by proper numerical fluxes. Thus, all the faces on the periodic boundaries will appear in the apply_face() function and not in this one.

      template <int dim, int fe_degree, typename number>
      void LaplaceOperator<dim, fe_degree, number>::apply_boundary(
      const MatrixFree<dim, number> &data,
    @@ -710,8 +710,8 @@
      laplace_unscaled(i, j) = sum_laplace;
      }
     
    -

    Next, we go through the cells and pass the scaled matrices to TensorProductMatrixSymmetricSum to actually compute the generalized eigenvalue problem for representing the inverse: Since the matrix approximation is constructed as $A\otimes M + M\otimes A$ and the weights are constant for each element, we can apply all weights on the Laplace matrix and simply keep the mass matrices unscaled. In the loop over cells, we want to make use of the geometry compression provided by the MatrixFree class and check if the current geometry is the same as on the last cell batch, in which case there is nothing to do. This compression can be accessed by FEEvaluation::get_mapping_data_index_offset() once reinit() has been called.

    -

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

    +

    Next, we go through the cells and pass the scaled matrices to TensorProductMatrixSymmetricSum to actually compute the generalized eigenvalue problem for representing the inverse: Since the matrix approximation is constructed as $A\otimes M + M\otimes A$ and the weights are constant for each element, we can apply all weights on the Laplace matrix and simply keep the mass matrices unscaled. In the loop over cells, we want to make use of the geometry compression provided by the MatrixFree class and check if the current geometry is the same as on the last cell batch, in which case there is nothing to do. This compression can be accessed by FEEvaluation::get_mapping_data_index_offset() once reinit() has been called.

    +

    Once we have accessed the inverse Jacobian through the FEEvaluation access function (we take the one for the zeroth quadrature point as they should be the same on all quadrature points for a Cartesian cell), we check that it is diagonal and then extract the determinant of the original Jacobian, i.e., the inverse of the determinant of the inverse Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to the 1d Laplacian times $d-1$ copies of the mass matrix.

      cell_matrices.clear();
      unsigned int old_mapping_data_index = numbers::invalid_unsigned_int;
    @@ -979,9 +979,9 @@
      phi.integrate_scatter(EvaluationFlags::values, system_rhs);
      }
     
    -

    Secondly, we also need to apply the Dirichlet and Neumann boundary conditions. This function is the missing part of to the function LaplaceOperator::apply_boundary() function once the exterior solution values $u^+ = -u^- + 2 g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ =
-   \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2
-   g_\text{N}$ on Neumann boundaries are inserted and expanded in terms of the boundary functions $g_\text{D}$ and $g_\text{N}$. One thing to remember is that we move the boundary conditions to the right hand side, so the sign is the opposite from what we imposed on the solution part.

    +

    Secondly, we also need to apply the Dirichlet and Neumann boundary conditions. This function is the missing part of to the function LaplaceOperator::apply_boundary() function once the exterior solution values $u^+ = -u^- + 2 g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ =
+   \mathbf{n}^-\cdot \nabla u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2
+   g_\text{N}$ on Neumann boundaries are inserted and expanded in terms of the boundary functions $g_\text{D}$ and $g_\text{N}$. One thing to remember is that we move the boundary conditions to the right hand side, so the sign is the opposite from what we imposed on the solution part.

    We could have issued both the cell and the boundary part through a MatrixFree::loop part, but we choose to manually write the full loop over all faces to learn how the index layout of face indices is set up in MatrixFree: Both the inner faces and the boundary faces share the index range, and all batches of inner faces have lower numbers than the batches of boundary cells. A single index for both variants allows us to easily use the same data structure FEFaceEvaluation for both cases that attaches to the same data field, just at different positions. The number of inner face batches (where a batch is due to the combination of several faces into one for vectorization) is given by MatrixFree::n_inner_face_batches(), whereas the number of boundary face batches is given by MatrixFree::n_boundary_face_batches().

      FEFaceEvaluation<dim, fe_degree> phi_face(data, true);
      for (unsigned int face = data.n_inner_face_batches();
    @@ -1153,7 +1153,7 @@
    void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
    -

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

    +

    The run() function sets up the initial grid and then runs the multigrid program in the usual way. As a domain, we choose a rectangle with periodic boundary conditions in the $x$-direction, a Dirichlet condition on the front face in $y$ direction (i.e., the face with index number 2, with boundary id equal to 0), and Neumann conditions on the back face as well as the two faces in $z$ direction for the 3d case (with boundary id equal to 1). The extent of the domain is a bit different in the $x$ direction (where we want to achieve a periodic solution given the definition of Solution) as compared to the $y$ and $z$ directions.

      template <int dim, int fe_degree>
      void LaplaceProblem<dim, fe_degree>::run()
      {
    @@ -1352,7 +1352,7 @@
      L2 Velocity Reduction L2 Pressure Reduction H1 Velocity Reduction
    MDoFs/s 2.94 3.29 3.62 3.72 3.47 3.41 2.93 2.88 2.57 2.27 2.01 1.87
    -

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    +

    We clearly see how the efficiency per DoF initially improves until it reaches a maximum for the polynomial degree $k=4$. This effect is surprising, not only because higher polynomial degrees often yield a vastly better solution, but especially also when having matrix-based schemes in mind where the denser coupling at higher degree leads to a monotonously decreasing throughput (and a drastic one in 3D, with $k=4$ being more than ten times slower than $k=1$!). For higher degrees, the throughput decreases a bit, which is both due to an increase in the number of iterations (going from 12 at $k=2,3,4$ to 19 at $k=10$) and due to the $\mathcal O(k)$ complexity of operator evaluation. Nonetheless, efficiency as the time to solution would be still better for higher polynomial degrees because they have better convergence rates (at least for problems as simple as this one): For $k=12$, we reach roundoff accuracy already with 1 million DoFs (solver time less than a second), whereas for $k=8$ we need 24 million DoFs and 8 seconds. For $k=5$, the error is around $10^{-9}$ with 57m DoFs and thus still far away from roundoff, despite taking 16 seconds.

    Note that the above numbers are a bit pessimistic because they include the time it takes the Chebyshev smoother to compute an eigenvalue estimate, which is around 10 percent of the solver time. If the system is solved several times (as e.g. common in fluid dynamics), this eigenvalue cost is only paid once and faster times become available.

    Evaluation of efficiency of ingredients

    Finally, we take a look at some of the special ingredients presented in this tutorial program, namely the FE_DGQHermite basis in particular and the specification of MatrixFree::DataAccessOnFaces. In the following table, the third row shows the optimized solver above, the fourth row shows the timings with only the MatrixFree::DataAccessOnFaces set to unspecified rather than the optimal gradients, and the last one with replacing FE_DGQHermite by the basic FE_DGQ elements where both the MPI exchange are more expensive and the operations done by FEFaceEvaluation::gather_evaluate() and FEFaceEvaluation::integrate_scatter().

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-11-15 06:44:31.523690280 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_6.html 2024-11-15 06:44:31.523690280 +0000 @@ -152,20 +152,20 @@
    Note
    The material presented here is also discussed in video lecture 15, video lecture 16, video lecture 17, video lecture 17.25, video lecture 17.5, video lecture 17.75. (All video lectures are also available here.)

    This program is finally about one of the main features of deal.II: the use of adaptively (locally) refined meshes. The program is still based on step-4 and step-5, and, as you will see, it does not actually take very much code to enable adaptivity. Indeed, while we do a great deal of explaining, adaptive meshes can be added to an existing program with less than ten lines of additional code. The program shows what these lines are, as well as another important ingredient of adaptive mesh refinement (AMR): a criterion that can be used to determine whether it is necessary to refine a cell because the error is large on it, whether the cell can be coarsened because the error is particularly small on it, or whether we should just leave the cell as it is. We will discuss all of these issues in the following.

    The program solves the same problem as step-5, that is, we solve the equation

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   -\nabla \cdot a(\mathbf x) \nabla u(\mathbf x) &= 1 \qquad\qquad & \text{in}\ \Omega,
   \\
   u &= 0 \qquad\qquad & \text{on}\ \partial\Omega,
-\end{align*} +\end{align*}" src="form_6129.png"/>

    -

    where $a(\mathbf x)$ is a spatially variable coefficient defined as

    -\begin{align*}
+<p> where <picture><source srcset=$a(\mathbf x)$ is a spatially variable coefficient defined as

    +\begin{align*}
   a(\mathbf x) =
   \begin{cases}
     20 & \text{if}\ |\mathbf x|<0.5, \\
     1  & \text{otherwise.}
   \end{cases}
-\end{align*} +\end{align*}" src="form_5620.png"/>

    What adaptively refined meshes look like

    There are a number of ways how one can adaptively refine meshes. The basic structure of the overall algorithm is always the same and consists of a loop over the following steps:

      @@ -195,18 +195,18 @@

      The first and third mesh are of course based on a square and a cube, but as the second mesh shows, this is not necessary. The important point is simply that we can refine a mesh independently of its neighbors (subject to the constraint that a cell can be only refined once more than its neighbors), but that we end up with these “hanging nodes” if we do this.

      Why adaptively refined meshes?

      Now that you have seen what these adaptively refined meshes look like, you should ask why we would want to do this. After all, we know from theory that if we refine the mesh globally, the error will go down to zero as

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \|\nabla(u-u_h)\|_{\Omega} \le C h_\text{max}^p \| \nabla^{p+1} u \|_{\Omega},
-\end{align*} +\end{align*}" src="form_6130.png"/>

      -

      where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

      +

      where $C$ is some constant independent of $h$ and $u$, $p$ is the polynomial degree of the finite element in use, and $h_\text{max}$ is the diameter of the largest cell. So if the largest cell is important, then why would we want to make the mesh fine in some parts of the domain but not all?

      The answer lies in the observation that the formula above is not optimal. In fact, some more work shows that the following is a better estimate (which you should compare to the square of the estimate above):

      -\begin{align*}
+<picture><source srcset=\begin{align*}
   \|\nabla(u-u_h)\|_{\Omega}^2 \le C \sum_K h_K^{2p} \| \nabla^{p+1} u \|^2_K.
-\end{align*} +\end{align*}" src="form_6132.png"/>

      -

      (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

      -

      Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

      +

      (Because $h_K\le h_\text{max}$, this formula immediately implies the previous one if you just pull the mesh size out of the sum.) What this formula suggests is that it is not necessary to make the largest cell small, but that the cells really only need to be small where $\| \nabla^{p+1} u \|_K$ is large! In other words: The mesh really only has to be fine where the solution has large variations, as indicated by the $p+1$st derivative. This makes intuitive sense: if, for example, we use a linear element $p=1$, then places where the solution is nearly linear (as indicated by $\nabla^2 u$ being small) will be well resolved even if the mesh is coarse. Only those places where the second derivative is large will be poorly resolved by large elements, and consequently that's where we should make the mesh small.

      +

      Of course, this a priori estimate is not very useful in practice since we don't know the exact solution $u$ of the problem, and consequently, we cannot compute $\nabla^{p+1}u$. But, and that is the approach commonly taken, we can compute numerical approximations of $\nabla^{p+1}u$ based only on the discrete solution $u_h$ that we have computed before. We will discuss this in slightly more detail below. This will then help us determine which cells have a large $p+1$st derivative, and these are then candidates for refining the mesh.

      How to deal with hanging nodes in theory

      The methods using triangular meshes mentioned above go to great lengths to make sure that each vertex is a vertex of all adjacent cells – i.e., that there are no hanging nodes. This then automatically makes sure that we can define shape functions in such a way that they are globally continuous (if we use the common $Q_p$ Lagrange finite element methods we have been using so far in the tutorial programs, as represented by the FE_Q class).

      On the other hand, if we define shape functions on meshes with hanging nodes, we may end up with shape functions that are not continuous. To see this, think about the situation above where the top right cell is not refined, and consider for a moment the use of a bilinear finite element. In that case, the shape functions associated with the hanging nodes are defined in the obvious way on the two small cells adjacent to each of the hanging nodes. But how do we extend them to the big adjacent cells? Clearly, the function's extension to the big cell cannot be bilinear because then it needs to be linear along each edge of the large cell, and that means that it needs to be zero on the entire edge because it needs to be zero on the two vertices of the large cell on that edge. But it is not zero at the hanging node itself when seen from the small cells' side – so it is not continuous. The following three figures show three of the shape functions along the edges in question that turn out to not be continuous when defined in the usual way simply based on the cells they are adjacent to:

      @@ -222,7 +222,7 @@
      A discontinuous shape function adjacent to a hanging node
    -

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    +

    But we do want the finite element solution to be continuous so that we have a “conforming finite element method” where the discrete finite element space is a proper subset of the $H^1$ function space in which we seek the solution of the Laplace equation. To guarantee that the global solution is continuous at these nodes as well, we have to state some additional constraints on the values of the solution at these nodes. The trick is to realize that while the shape functions shown above are discontinuous (and consequently an arbitrary linear combination of them is also discontinuous), that linear combinations in which the shape functions are added up as $u_h(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$ can be continuous if the coefficients $U_j$ satisfy certain relationships. In other words, the coefficients $U_j$ can not be chosen arbitrarily but have to satisfy certain constraints so that the function $u_h$ is in fact continuous. What these constraints have to look is relatively easy to understand conceptually, but the implementation in software is complicated and takes several thousand lines of code. On the other hand, in user code, it is only about half a dozen lines you have to add when dealing with hanging nodes.

    In the program below, we will show how we can get these constraints from deal.II, and how to use them in the solution of the linear system of equations. Before going over the details of the program below, you may want to take a look at the Constraints on degrees of freedom documentation topic that explains how these constraints can be computed and what classes in deal.II work on them.

    How to deal with hanging nodes in practice

    The practice of hanging node constraints is rather simpler than the theory we have outlined above. In reality, you will really only have to add about half a dozen lines of additional code to a program like step-4 to make it work with adaptive meshes that have hanging nodes. The interesting part about this is that it is entirely independent of the equation you are solving: The algebraic nature of these constraints has nothing to do with the equation and only depends on the choice of finite element. As a consequence, the code to deal with these constraints is entirely contained in the deal.II library itself, and you do not need to worry about the details.

    @@ -235,11 +235,11 @@

    These four steps are really all that is necessary – it's that simple from a user perspective. The fact that, in the function calls mentioned above, you will run through several thousand lines of not-so-trivial code is entirely immaterial to this: In user code, there are really only four additional steps.

    How we obtain locally refined meshes

    The next question, now that we know how to deal with meshes that have these hanging nodes is how we obtain them.

    -

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    +

    A simple way has already been shown in step-1: If you know where it is necessary to refine the mesh, then you can create one by hand. But in reality, we don't know this: We don't know the solution of the PDE up front (because, if we did, we wouldn't have to use the finite element method), and consequently we do not know where it is necessary to add local mesh refinement to better resolve areas where the solution has strong variations. But the discussion above shows that maybe we can get away with using the discrete solution $u_h$ on one mesh to estimate the derivatives $\nabla^{p+1} u$, and then use this to determine which cells are too large and which already small enough. We can then generate a new mesh from the current one using local mesh refinement. If necessary, this step is then repeated until we are happy with our numerical solution – or, more commonly, until we run out of computational resources or patience.

    So that's exactly what we will do. The locally refined grids are produced using an error estimator which estimates the energy error for numerical solutions of the Laplace operator. Since it was developed by Kelly and co-workers, we often refer to it as the “Kelly refinement indicator” in the library, documentation, and mailing list. The class that implements it is called KellyErrorEstimator, and there is a great deal of information to be found in the documentation of that class that need not be repeated here. The summary, however, is that the class computes a vector with as many entries as there are active cells, and where each entry contains an estimate of the error on that cell. This estimate is then used to refine the cells of the mesh: those cells that have a large error will be marked for refinement, those that have a particularly small estimate will be marked for coarsening. We don't have to do this by hand: The functions in namespace GridRefinement will do all of this for us once we have obtained the vector of error estimates.

    It is worth noting that while the Kelly error estimator was developed for Laplace's equation, it has proven to be a suitable tool to generate locally refined meshes for a wide range of equations, not even restricted to elliptic only problems. Although it will create non-optimal meshes for other equations, it is often a good way to quickly produce meshes that are well adapted to the features of solutions, such as regions of great variation or discontinuities.

    Boundary conditions

    -

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    +

    It turns out that one can see Dirichlet boundary conditions as just another constraint on the degrees of freedom. It's a particularly simple one, indeed: If $j$ is a degree of freedom on the boundary, with position $\mathbf x_j$, then imposing the boundary condition $u=g$ on $\partial\Omega$ simply yields the constraint $U_j=g({\mathbf x}_j)$.

    The AffineConstraints class can handle such constraints as well, which makes it convenient to let the same object we use for hanging node constraints also deal with these Dirichlet boundary conditions. This way, we don't need to apply the boundary conditions after assembly (like we did in the earlier steps). All that is necessary is that we call the variant of VectorTools::interpolate_boundary_values() that returns its information in an AffineConstraints object, rather than the std::map we have used in previous tutorial programs.

    Other things this program shows

    Since the concepts used for locally refined grids are so important, we do not show much other material in this example. The most important exception is that we show how to use biquadratic elements instead of the bilinear ones which we have used in all previous examples. In fact, the use of higher order elements is accomplished by only replacing three lines of the program, namely the initialization of the fe member variable in the constructor of the main class of this program, and the use of an appropriate quadrature formula in two places. The rest of the program is unchanged.

    @@ -488,8 +488,8 @@

    We use a sophisticated error estimation scheme to refine the mesh instead of global refinement. We will use the KellyErrorEstimator class which implements an error estimator for the Laplace equation; it can in principle handle variable coefficients, but we will not use these advanced features, but rather use its most simple form since we are not interested in quantitative results but only in a quick way to generate locally refined grids.

    Although the error estimator derived by Kelly et al. was originally developed for the Laplace equation, we have found that it is also well suited to quickly generate locally refined grids for a wide class of problems. This error estimator uses the solution gradient's jump at cell faces (which is a measure for the second derivatives) and scales it by the size of the cell. It is therefore a measure for the local smoothness of the solution at the place of each cell and it is thus understandable that it yields reasonable grids also for hyperbolic transport problems or the wave equation as well, although these grids are certainly suboptimal compared to approaches specially tailored to the problem. This error estimator may therefore be understood as a quick way to test an adaptive program.

    The way the estimator works is to take a DoFHandler object describing the degrees of freedom and a vector of values for each degree of freedom as input and compute a single indicator value for each active cell of the triangulation (i.e. one value for each of the active cells). To do so, it needs two additional pieces of information: a face quadrature formula, i.e., a quadrature formula on dim-1 dimensional objects. We use a 3-point Gauss rule again, a choice that is consistent and appropriate with the bi-quadratic finite element shape functions in this program. (What constitutes a suitable quadrature rule here of course depends on knowledge of the way the error estimator evaluates the solution field. As said above, the jump of the gradient is integrated over each face, which would be a quadratic function on each face for the quadratic elements in use in this example. In fact, however, it is the square of the jump of the gradient, as explained in the documentation of that class, and that is a quartic function, for which a 3 point Gauss formula is sufficient since it integrates polynomials up to order 5 exactly.)

    -

    Secondly, the function wants a list of boundary indicators for those boundaries where we have imposed Neumann values of the kind $\partial_n u(\mathbf x) = h(\mathbf x)$, along with a function $h(\mathbf
-   x)$ for each such boundary. This information is represented by a map from boundary indicators to function objects describing the Neumann boundary values. In the present example program, we do not use Neumann boundary values, so this map is empty, and in fact constructed using the default constructor of the map in the place where the function call expects the respective function argument.

    +

    Secondly, the function wants a list of boundary indicators for those boundaries where we have imposed Neumann values of the kind $\partial_n u(\mathbf x) = h(\mathbf x)$, along with a function $h(\mathbf
+   x)$ for each such boundary. This information is represented by a map from boundary indicators to function objects describing the Neumann boundary values. In the present example program, we do not use Neumann boundary values, so this map is empty, and in fact constructed using the default constructor of the map in the place where the function call expects the respective function argument.

    The output is a vector of values for all active cells. While it may make sense to compute the value of a solution degree of freedom very accurately, it is usually not necessary to compute the error indicator corresponding to the solution on a cell particularly accurately. We therefore typically use a vector of floats instead of a vector of doubles to represent error indicators.

      template <int dim>
      void Step6<dim>::refine_grid()
    @@ -697,18 +697,18 @@ -

    As we can see, all preconditioners behave pretty much the same on this simple problem, with the number of iterations growing like ${\cal
-O}(N^{1/2})$ and because each iteration requires around ${\cal
-O}(N)$ operations the total CPU time grows like ${\cal
-O}(N^{3/2})$ (for the few smallest meshes, the CPU time is so small that it doesn't record). Note that even though it is the simplest method, Jacobi is the fastest for this problem.

    +

    As we can see, all preconditioners behave pretty much the same on this simple problem, with the number of iterations growing like ${\cal
+O}(N^{1/2})$ and because each iteration requires around ${\cal
+O}(N)$ operations the total CPU time grows like ${\cal
+O}(N^{3/2})$ (for the few smallest meshes, the CPU time is so small that it doesn't record). Note that even though it is the simplest method, Jacobi is the fastest for this problem.

    The situation changes slightly when the finite element is not a bi-quadratic one (i.e., polynomial degree two) as selected in the constructor of this program, but a bi-linear one (polynomial degree one). If one makes this change, the results are as follows:

    In other words, while the increase in iterations and CPU time is as before, Jacobi is now the method that requires the most iterations; it is still the fastest one, however, owing to the simplicity of the operations it has to perform. This is not to say that Jacobi is actually a good preconditioner – for problems of appreciable size, it is definitely not, and other methods will be substantially better – but really only that it is fast because its implementation is so simple that it can compensate for a larger number of iterations.

    -

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    -

    Finally, the last message to take home is that when the data shown above was generated (in 2018), linear systems with 100,000 unknowns are easily solved on a desktop or laptop machine in about a second, making the solution of relatively simple 2d problems even to very high accuracy not that big a task as it used to be in the past. At the same time, the situation for 3d problems continues to be quite different: A uniform 2d mesh with 100,000 unknowns corresponds to a grid with about $300 \times 300$ nodes; the corresponding 3d mesh has $300 \times 300 \times 300$ nodes and 30 million unknowns. Because finite element matrices in 3d have many more nonzero entries than in 2d, solving these linear systems will not only take 300 times as much CPU time, but substantially longer. In other words, achieving the same resolution in 3d is quite a large problem, and solving it within a reasonable amount of time will require much more work to implement better linear solvers. As mentioned above, multigrid methods and matrix-free methods (see, for example, step-37), along with parallelization (step-40) will be necessary, but are then also able to comfortably solve such linear systems.

    +

    The message to take away from this is not that simplicity in preconditioners is always best. While this may be true for the current problem, it definitely is not once we move to more complicated problems (elasticity or Stokes, for examples step-8 or step-22). Secondly, all of these preconditioners still lead to an increase in the number of iterations as the number $N$ of degrees of freedom grows, for example ${\cal O}(N^\alpha)$; this, in turn, leads to a total growth in effort as ${\cal O}(N^{1+\alpha})$ since each iteration takes ${\cal O}(N)$ work. This behavior is undesirable: we would really like to solve linear systems with $N$ unknowns in a total of ${\cal O}(N)$ work; there is a class of preconditioners that can achieve this, namely geometric (step-16, step-37, step-39) or algebraic multigrid (step-31, step-40, and several others) preconditioners. They are, however, significantly more complex than the preconditioners outlined above, and so we will leave their use to these later tutorial programs. The point to make, however, is that "real" finite element programs do not use the preconditioners we mention above: These are simply shown for expository purposes.

    +

    Finally, the last message to take home is that when the data shown above was generated (in 2018), linear systems with 100,000 unknowns are easily solved on a desktop or laptop machine in about a second, making the solution of relatively simple 2d problems even to very high accuracy not that big a task as it used to be in the past. At the same time, the situation for 3d problems continues to be quite different: A uniform 2d mesh with 100,000 unknowns corresponds to a grid with about $300 \times 300$ nodes; the corresponding 3d mesh has $300 \times 300 \times 300$ nodes and 30 million unknowns. Because finite element matrices in 3d have many more nonzero entries than in 2d, solving these linear systems will not only take 300 times as much CPU time, but substantially longer. In other words, achieving the same resolution in 3d is quite a large problem, and solving it within a reasonable amount of time will require much more work to implement better linear solvers. As mentioned above, multigrid methods and matrix-free methods (see, for example, step-37), along with parallelization (step-40) will be necessary, but are then also able to comfortably solve such linear systems.

    A better mesh

    If you look at the meshes above, you will see even though the domain is the unit disk, and the jump in the coefficient lies along a circle, the cells that make up the mesh do not track this geometry well. The reason, already hinted at in step-1, is that in the absence of other information, the Triangulation class only sees a bunch of coarse grid cells but has, of course, no real idea what kind of geometry they might represent when looked at together. For this reason, we need to tell the Triangulation what to do when a cell is refined: where should the new vertices at the edge midpoints and the cell midpoint be located so that the child cells better represent the desired geometry than the parent cell.

    To visualize what the triangulation actually knows about the geometry, it is not enough to just output the location of vertices and draw a straight line for each edge; instead, we have to output both interior and boundary lines as multiple segments so that they look curved. We can do this by making one change to the gnuplot part of output_results:

    {
    @@ -812,27 +812,27 @@
    Initial grid: the Ladutenko grid with one global refinement.
    First adaptively refined Ladutenko grid.
    Second adaptively refined Ladutenko grid.
    Third adaptively refined Ladutenko grid.
    Fourth adaptively refined Ladutenko grid. The cells are clustered
          along the inner circle.
    Fifth adaptively refined Ladutenko grid: the cells are clustered
          along the inner circle.

    Creating good meshes, and in particular making them fit the geometry you want, is a complex topic in itself. You can find much more on this in step-49, step-53, and step-54, among other tutorial programs that cover the issue. step-65 shows another, less manual way to achieve a mesh well fit to the problem here. Information on curved domains can also be found in the documentation topic on Manifold descriptions.

    -

    Why does it make sense to choose a mesh that tracks the internal interface? There are a number of reasons, but the most essential one comes down to what we actually integrate in our bilinear form. Conceptually, we want to integrate the term $A_{ij}^K=\int_K
-a(\mathbf x) \nabla \varphi_i(\mathbf x) \nabla \varphi_j(\mathbf x) ; dx$ as the contribution of cell $K$ to the matrix entry $A_{ij}$. We can not compute it exactly and have to resort to quadrature. We know that quadrature is accurate if the integrand is smooth. That is because quadrature in essence computes a polynomial approximation to the integrand that coincides with the integrand in the quadrature points, and then computes the volume under this polynomial as an approximation to the volume under the original integrand. This polynomial interpolant is accurate if the integrand is smooth on a cell, but it is usually rather inaccurate if the integrand is discontinuous on a cell.

    +

    Why does it make sense to choose a mesh that tracks the internal interface? There are a number of reasons, but the most essential one comes down to what we actually integrate in our bilinear form. Conceptually, we want to integrate the term $A_{ij}^K=\int_K
+a(\mathbf x) \nabla \varphi_i(\mathbf x) \nabla \varphi_j(\mathbf x) ; dx$ as the contribution of cell $K$ to the matrix entry $A_{ij}$. We can not compute it exactly and have to resort to quadrature. We know that quadrature is accurate if the integrand is smooth. That is because quadrature in essence computes a polynomial approximation to the integrand that coincides with the integrand in the quadrature points, and then computes the volume under this polynomial as an approximation to the volume under the original integrand. This polynomial interpolant is accurate if the integrand is smooth on a cell, but it is usually rather inaccurate if the integrand is discontinuous on a cell.

    Consequently, it is worthwhile to align cells in such a way that the interfaces across which the coefficient is discontinuous are aligned with cell interfaces. This way, the coefficient is constant on each cell, following which the integrand will be smooth, and its polynomial approximation and the quadrature approximation of the integral will both be accurate. Note that such an alignment is common in many practical cases, so deal.II provides a number of functions (such as material_id) to help manage such a scenario. Refer to step-28 and step-46 for examples of how material ids can be applied.

    Finally, let us consider the case of a coefficient that has a smooth and non-uniform distribution in space. We can repeat once again all of the above discussion on the representation of such a function with the quadrature. So, to simulate it accurately there are a few readily available options: you could reduce the cell size, increase the order of the polynomial used in the quadrature formula, select a more appropriate quadrature formula, or perform a combination of these steps. The key is that providing the best fit of the coefficient's spatial dependence with the quadrature polynomial will lead to a more accurate finite element solution of the PDE.

    -

    As a final note: The discussion in the previous paragraphs shows, we here have a very concrete way of stating what we think of a good mesh – it should be aligned with the jump in the coefficient. But one could also have asked this kind of question in a more general setting: Given some equation with a smooth solution and smooth coefficients, can we say what a good mesh would look like? This is a question for which the answer is easier to state in intuitive terms than mathematically: A good mesh has cells that all, by and large, look like squares (or cubes, in 3d). A bad mesh would contain cells that are very elongated in some directions or, more generally, for which there are cells that have both short and long edges. There are many ways in which one could assign a numerical quality index to each cell that measures whether the cell is "good" or "bad"; some of these are often chosen because they are cheap and easy to compute, whereas others are based on what enters into proofs of convergence. An example of the former would be the ratio of the longest to the shortest edge of a cell: In the ideal case, that ratio would be one; bad cells have values much larger than one. An example of the latter kind would consider the gradient (the "Jacobian") of the mapping from the reference cell $\hat K=[0,1]^d$ to the real cell $K$; this gradient is a matrix, and a quantity that enters into error estimates is the maximum over all points on the reference cell of the ratio of the largest to the smallest eigenvalue of this matrix. It is again not difficult to see that this ratio is constant if the cell $K$ is an affine image of $\hat K$, and that it is one for squares and cubes.

    +

    As a final note: The discussion in the previous paragraphs shows, we here have a very concrete way of stating what we think of a good mesh – it should be aligned with the jump in the coefficient. But one could also have asked this kind of question in a more general setting: Given some equation with a smooth solution and smooth coefficients, can we say what a good mesh would look like? This is a question for which the answer is easier to state in intuitive terms than mathematically: A good mesh has cells that all, by and large, look like squares (or cubes, in 3d). A bad mesh would contain cells that are very elongated in some directions or, more generally, for which there are cells that have both short and long edges. There are many ways in which one could assign a numerical quality index to each cell that measures whether the cell is "good" or "bad"; some of these are often chosen because they are cheap and easy to compute, whereas others are based on what enters into proofs of convergence. An example of the former would be the ratio of the longest to the shortest edge of a cell: In the ideal case, that ratio would be one; bad cells have values much larger than one. An example of the latter kind would consider the gradient (the "Jacobian") of the mapping from the reference cell $\hat K=[0,1]^d$ to the real cell $K$; this gradient is a matrix, and a quantity that enters into error estimates is the maximum over all points on the reference cell of the ratio of the largest to the smallest eigenvalue of this matrix. It is again not difficult to see that this ratio is constant if the cell $K$ is an affine image of $\hat K$, and that it is one for squares and cubes.

    In practice, it might be interesting to visualize such quality measures. The function GridTools::compute_aspect_ratio_of_cells() provides one way to get this kind of information. Even better, visualization tools such as VisIt often allow you to visualize this sort of information for a variety of measures from within the visualization software itself; in the case of VisIt, just add a "pseudo-color" plot and select one of the mesh quality measures instead of the solution field.

    Playing with the regularity of the solution

    From a mathematical perspective, solutions of the Laplace equation

    -\[
+<picture><source srcset=\[
   -\Delta u = f
-\] +\]" src="form_6151.png"/>

    -

    on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies $f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    -

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    -\[
+<p> on smoothly bounded, convex domains are known to be smooth themselves. The exact degree of smoothness, i.e., the function space in which the solution lives, depends on how smooth exactly the boundary of the domain is, and how smooth the right hand side is. Some regularity of the solution may be lost at the boundary, but one generally has that the solution is twice more differentiable in compact subsets of the domain than the right hand side. If, in particular, the right hand side satisfies <picture><source srcset=$f\in C^\infty(\Omega)$, then $u \in C^\infty(\Omega_i)$ where $\Omega_i$ is any compact subset of $\Omega$ ( $\Omega$ is an open domain, so a compact subset needs to keep a positive distance from $\partial\Omega$).

    +

    The situation we chose for the current example is different, however: we look at an equation with a non-constant coefficient $a(\mathbf x)$:

    +\[
   -\nabla \cdot (a \nabla u) = f.
-\] +\]" src="form_6155.png"/>

    -

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    -

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    -

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    +

    Here, if $a$ is not smooth, then the solution will not be smooth either, regardless of $f$. In particular, we expect that wherever $a$ is discontinuous along a line (or along a plane in 3d), the solution will have a kink. This is easy to see: if for example $f$ is continuous, then $f=-\nabla \cdot (a \nabla u)$ needs to be continuous. This means that $a \nabla u$ must be continuously differentiable (not have a kink). Consequently, if $a$ has a discontinuity, then $\nabla u$ must have an opposite discontinuity so that the two exactly cancel and their product yields a function without a discontinuity. But for $\nabla u$ to have a discontinuity, $u$ must have a kink. This is of course exactly what is happening in the current example, and easy to observe in the pictures of the solution.

    +

    In general, if the coefficient $a(\mathbf x)$ is discontinuous along a line in 2d, or a plane in 3d, then the solution may have a kink, but the gradient of the solution will not go to infinity. That means, that the solution is at least still in the Sobolev space $W^{1,\infty}$ (i.e., roughly speaking, in the space of functions whose derivatives are bounded). On the other hand, we know that in the most extreme cases – i.e., where the domain has reentrant corners, the right hand side only satisfies $f\in H^{-1}$, or the coefficient $a$ is only in $L^\infty$ – all we can expect is that $u\in H^1$ (i.e., the Sobolev space of functions whose derivative is square integrable), a much larger space than $W^{1,\infty}$. It is not very difficult to create cases where the solution is in a space $H^{1+s}$ where we can get $s$ to become as small as we want. Such cases are often used to test adaptive finite element methods because the mesh will have to resolve the singularity that causes the solution to not be in $W^{1,\infty}$ any more.

    +

    The typical example one uses for this is called the Kellogg problem (referring to [Kel74]), which in the commonly used form has a coefficient $a(\mathbf x)$ that has different values in the four quadrants of the plane (or eight different values in the octants of ${\mathbb R}^3$). The exact degree of regularity (the $s$ in the index of the Sobolev space above) depends on the values of $a(\mathbf x)$ coming together at the origin, and by choosing the jumps large enough, the regularity of the solution can be made as close as desired to $H^1$.

    To implement something like this, one could replace the coefficient function by the following (shown here only for the 2d case):

    template <int dim>
    double coefficient (const Point<dim> &p)
    {
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-11-15 06:44:31.591690887 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_60.html 2024-11-15 06:44:31.591690887 +0000 @@ -145,24 +145,24 @@
    Note
    If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.1243280

    Introduction

    Non-matching grid constraints through distributed Lagrange multipliers

    -

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    +

    In this tutorial we consider the case of two domains, $\Omega$ in $R^{\text{spacedim}}$ and $\Gamma$ in $R^{\text{dim}}$, where $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$). We want to solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$.

    There are two interesting scenarios:

      -
    • the geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero, or
    • -
    • the embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain.
    • +
    • the geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero, or
    • +
    • the embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain.
    -

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    +

    In both cases define the restriction operator $\gamma$ as the operator that, given a continuous function on $\Omega$, returns its (continuous) restriction on $\Gamma$, i.e.,

    \[
 \gamma : C^0(\Omega) \mapsto C^0(\Gamma), \quad \text{ s.t. } \gamma u = u|_{\Gamma} \in C^0(\Gamma),
 \quad \forall u \in C^0(\Omega).
 \]

    -

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    -

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    +

    It is well known that the operator $\gamma$ can be extended to a continuous operator on $H^1(\Omega)$, mapping functions in $H^1(\Omega)$ to functions in $H^1(\Gamma)$ when the intrinsic dimension of $\Gamma$ is the same of $\Omega$.

    +

    The same is true, with a less regular range space (namely $H^{1/2}(\Gamma)$), when the dimension of $\Gamma$ is one less with respect to $\Omega$, and $\Gamma$ does not have a boundary. In this second case, the operator $\gamma$ is also known as the trace operator, and it is well defined for Lipschitz co-dimension one curves and surfaces $\Gamma$ embedded in $\Omega$ (read this wikipedia article for further details on the trace operator).

    The co-dimension two case is a little more complicated, and in general it is not possible to construct a continuous trace operator, not even from $H^1(\Omega)$ to $L^2(\Gamma)$, when the dimension of $\Gamma$ is zero or one respectively in two and three dimensions.

    -

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    -

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    +

    In this tutorial program we're not interested in further details on $\gamma$: we take the extension $\gamma$ for granted, assuming that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

    +

    We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, a forcing term $f \in L^2(\Omega)$ and a Dirichlet boundary condition $u_D$ on $\partial \Omega$, find the solution $u$ to

    \begin{eqnarray*}
 - \Delta u + \gamma^T \lambda &=& f  \text{ in } \Omega\\
@@ -170,10 +170,10 @@
 u & = & u_D \text{ on } \partial\Omega.
 \end{eqnarray*}

    -

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    -

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    +

    This is a constrained problem, where we are looking for a function $u$ that solves the Poisson equation and that satisfies Dirichlet boundary conditions $u=u_D$ on $\partial \Omega$, subject to the constraint $\gamma u = g$ using a Lagrange multiplier.

    +

    When $f=0$ this problem has a physical interpretation: harmonic functions, i.e., functions that satisfy the Laplace equation, can be thought of as the displacements of a membrane whose boundary values are prescribed. The current situation then corresponds to finding the shape of a membrane for which not only the displacement at the boundary, but also on $\Gamma$ is prescribed. For example, if $\Gamma$ is a closed curve in 2d space, then that would model a soap film that is held in place by a wire loop along $\partial \Omega$ as well as a second loop along $\Gamma$. In cases where $\Gamma$ is a whole area, you can think of this as a membrane that is stretched over an obstacle where $\Gamma$ is the contact area. (If the contact area is not known we have a different problem – called the "obstacle problem" – which is modeled in step-41.)

    As a first example we study the zero Dirichlet boundary condition on $\partial\Omega$. The same equations apply if we apply zero Neumann boundary conditions on $\partial\Omega$ or a mix of the two.

    -

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    +

    The variational formulation can be derived by introducing two infinite dimensional spaces $V(\Omega)$ and $Q^*(\Gamma)$, respectively for the solution $u$ and for the Lagrange multiplier $\lambda$.

    Multiplying the first equation by $v \in V(\Omega)$ and the second by $q \in
 Q(\Gamma)$, integrating by parts when possible, and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

    Given a sufficiently regular function $g$ on $\Gamma$, find the solution $u$ to

    @@ -182,23 +182,23 @@ (\gamma u, q)_{\Gamma} &=& (g,q)_{\Gamma} \qquad \forall q \in Q(\Gamma), \end{eqnarray*}" src="form_6176.png"/>

    -

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    -

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    +

    where $(\cdot, \cdot)_{\Omega}$ and $(\cdot, \cdot)_{\Gamma}$ represent, respectively, $L^2$ scalar products in $\Omega$ and in $\Gamma$.

    +

    Inspection of the variational formulation tells us that the space $V(\Omega)$ can be taken to be $H^1_0(\Omega)$. The space $Q(\Gamma)$, in the co-dimension zero case, should be taken as $H^1(\Gamma)$, while in the co-dimension one case should be taken as $H^{1/2}(\Gamma)$.

    The function $g$ should therefore be either in $H^1(\Gamma)$ (for the co-dimension zero case) or $H^{1/2}(\Gamma)$ (for the co-dimension one case). This leaves us with a Lagrange multiplier $\lambda$ in $Q^*(\Gamma)$, which is either $H^{-1}(\Gamma)$ or $H^{-1/2}(\Gamma)$.

    -

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    -

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    +

    There are two options for the discretization of the problem above. One could choose matching discretizations, where the Triangulation for $\Gamma$ is aligned with the Triangulation for $\Omega$, or one could choose to discretize the two domains in a completely independent way.

    +

    The first option is clearly more indicated for the simple problem we proposed above: it is sufficient to use a single Triangulation for $\Omega$ and then impose certain constraints depending $\Gamma$. An example of this approach is studied in step-40, where the solution has to stay above an obstacle and this is achieved imposing constraints on $\Omega$.

    To solve more complex problems, for example one where the domain $\Gamma$ is time dependent, the second option could be a more viable solution. Handling non aligned meshes is complex by itself: to illustrate how is done we study a simple problem.

    The technique we describe here is presented in the literature using one of many names: the immersed finite element method, the fictitious boundary method, the distributed Lagrange multiplier method, and others. The main principle is that the discretization of the two grids and of the two finite element spaces are kept completely independent. This technique is particularly efficient for the simulation of fluid-structure interaction problems, where the configuration of the embedded structure is part of the problem itself, and one solves a (possibly non-linear) elastic problem to determine the (time dependent) configuration of $\Gamma$, and a (possibly non-linear) flow problem in $\Omega
 \setminus \Gamma$, plus coupling conditions on the interface between the fluid and the solid.

    In this tutorial program we keep things a little simpler, and we assume that the configuration of the embedded domain is given in one of two possible ways:

      -
    • as a deformation mapping $\psi: \Gamma_0 \mapsto \Gamma \subseteq \Omega$, defined on a continuous finite dimensional space on $\Gamma_0$ and representing, for any point $x \in \Gamma_0$, its coordinate $\psi(x)$ in $\Omega$;
    • -
    • as a displacement mapping $\delta \psi(x) = \psi(x)-x$ for $x\in \Gamma_0$, representing for any point $x$ the displacement vector applied in order to deform $x$ to its actual configuration $\psi(x) = x +\delta\psi(x)$.
    • +
    • as a deformation mapping $\psi: \Gamma_0 \mapsto \Gamma \subseteq \Omega$, defined on a continuous finite dimensional space on $\Gamma_0$ and representing, for any point $x \in \Gamma_0$, its coordinate $\psi(x)$ in $\Omega$;
    • +
    • as a displacement mapping $\delta \psi(x) = \psi(x)-x$ for $x\in \Gamma_0$, representing for any point $x$ the displacement vector applied in order to deform $x$ to its actual configuration $\psi(x) = x +\delta\psi(x)$.
    -

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    +

    We define the embedded reference domain $\Gamma_0$ embedded_grid: on this triangulation we construct a finite dimensional space (embedded_configuration_dh) to describe either the deformation or the displacement through a FiniteElement system of FE_Q objects (embedded_configuration_fe). This finite dimensional space is used only to interpolate a user supplied function (embedded_configuration_function) representing either $\psi$ (if the parameter use_displacement is set to false) or $\delta\psi$ (if the parameter use_displacement is set to true).

    The Lagrange multiplier $\lambda$ and the user supplied function $g$ are defined through another finite dimensional space embedded_dh, and through another FiniteElement embedded_fe, using the same reference domain. In order to take into account the deformation of the domain, either a MappingFEField or a MappingQEulerian object are initialized with the embedded_configuration vector.

    In the embedding space, a standard finite dimensional space space_dh is constructed on the embedding grid space_grid, using the FiniteElement space_fe, following almost verbatim the approach taken in step-6.

    -

    We represent the discretizations of the spaces $V$ and $Q$ with

    +

    We represent the discretizations of the spaces $V$ and $Q$ with

    \[
 V_h(\Omega) = \text{span} \{v_i\}_{i=1}^n
 \] @@ -208,7 +208,7 @@ Q_h(\Gamma) = \text{span} \{q_i\}_{i=1}^m \]" src="form_6191.png"/>

    -

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    +

    respectively, where $n$ is the dimension of space_dh, and $m$ the dimension of embedded_dh.

    Once all the finite dimensional spaces are defined, the variational formulation of the problem above leaves us with the following finite dimensional system of equations:

    \[
@@ -236,7 +236,7 @@
 G_{\alpha} &\dealcoloneq& (g, q_\alpha)_\Gamma \qquad \alpha = 1,\dots, m.
 \end{eqnarray*}

    -

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    +

    While the matrix $K$ is the standard stiffness matrix for the Poisson problem on $\Omega$, and the vector $G$ is a standard right-hand-side vector for a finite element problem with forcing term $g$ on $\Gamma$, (see, for example, step-3), the matrix $C$ or its transpose $C^T$ are non-standard since they couple information on two non-matching grids.

    In particular, the integral that appears in the computation of a single entry of $C$, is computed on $\Gamma$. As usual in finite elements we split this integral into contributions from all cells of the triangulation used to discretize $\Gamma$, we transform the integral on $K$ to an integral on the reference element $\hat K$, where $F_{K}$ is the mapping from $\hat K$ to $K$, and compute the integral on $\hat K$ using a quadrature formula:

    \[
@@ -246,13 +246,13 @@
 \]

    Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    +(\hat x_i)$" src="form_6197.png"/>. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed for an entry of the matrix $C$.

    To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

    • For a given cell $K$ in $\Gamma$ compute the real point $y_i \dealcoloneq F_{K} (\hat
-x_i)$, where $x_i$ is one of the quadrature points used for the integral on $K
+x_i)$, where $x_i$ is one of the quadrature points used for the integral on $K
 \subseteq \Gamma$.
    • -
    • Find the cell of $\Omega$ in which $y_i$ lies. We shall call this element $T$.
    • +
    • Find the cell of $\Omega$ in which $y_i$ lies. We shall call this element $T$.
    • To evaluate the basis function use the inverse of the mapping $G_T$ that transforms the reference element $\hat T$ into the element $T$: $v_j(y_i) = \hat
 v_j \circ G^{-1}_{T} (y_i)$.
    @@ -272,8 +272,8 @@

    The problem we solve here is identical to step-4, with the difference that we impose some constraints on an embedded domain $\Gamma$. The tutorial is written in a dimension independent way, and in the results section we show how to vary both dim and spacedim.

    The tutorial is compiled for dim equal to one and spacedim equal to two. If you want to run the program in embedding dimension spacedim equal to three, you will most likely want to change the reference domain for $\Gamma$ to be, for example, something you read from file, or a closed sphere that you later deform to something more interesting.

    In the default scenario, $\Gamma$ has co-dimension one, and this tutorial program implements the Fictitious Boundary Method. As it turns out, the same techniques are used in the Variational Immersed Finite Element Method, and the coupling operator $C$ defined above is the same in almost all of these non-matching methods.

    -

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    -

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    +

    The embedded domain is assumed to be included in $\Omega$, which we take as the unit square $[0,1]^2$. The definition of the fictitious domain $\Gamma$ can be modified through the parameter file, and can be given as a mapping from the reference interval $[0,1]$ to a curve in $\Omega$.

    +

    If the curve is closed, then the results will be similar to running the same problem on a grid whose boundary is $\Gamma$. The program will happily run also with a non-closed $\Gamma$, although in those cases the mathematical formulation of the problem is more difficult, since $\Gamma$ will have a boundary by itself that has co-dimension two with respect to the domain $\Omega$.

    References

    DistributedLagrangeProblem

    -

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    -

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    +

    In the DistributedLagrangeProblem, we need two parameters describing the dimensions of the domain $\Gamma$ (dim) and of the domain $\Omega$ (spacedim).

    +

    These will be used to initialize a Triangulation<dim,spacedim> (for $\Gamma$) and a Triangulation<spacedim,spacedim> (for $\Omega$).

    A novelty with respect to other tutorial programs is the heavy use of std::unique_ptr. These behave like classical pointers, with the advantage of doing automatic house-keeping: the contained object is automatically destroyed as soon as the unique_ptr goes out of scope, even if it is inside a container or there's an exception. Moreover it does not allow for duplicate pointers, which prevents ownership problems. We do this, because we want to be able to i) construct the problem, ii) read the parameters, and iii) initialize all objects according to what is specified in a parameter file.

    We construct the parameters of our problem in the internal class Parameters, derived from ParameterAcceptor. The DistributedLagrangeProblem class takes a const reference to a Parameters object, so that it is not possible to modify the parameters from within the DistributedLagrangeProblem class itself.

    We could have initialized the parameters first, and then pass the parameters to the DistributedLagrangeProblem assuming all entries are set to the desired values, but this has two disadvantages:

    @@ -369,16 +369,16 @@
     

    The parameters now described can all be set externally using a parameter file: if no parameter file is present when running the executable, the program will create a "parameters.prm" file with the default values defined here, and then abort to give the user a chance to modify the parameters.prm file.

    -

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

    +

    Initial refinement for the embedding grid, corresponding to the domain $\Omega$.

      unsigned int initial_refinement = 4;
     
    -

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

    +

    The interaction between the embedded grid $\Omega$ and the embedding grid $\Gamma$ is handled through the computation of $C$, which involves all cells of $\Omega$ overlapping with parts of $\Gamma$: a higher refinement of such cells might improve quality of our computations. For this reason we define delta_refinement: if it is greater than zero, then we mark each cell of the space grid that contains a vertex of the embedded grid and its neighbors, execute the refinement, and repeat this process delta_refinement times.

      unsigned int delta_refinement = 3;
     

    Starting refinement of the embedded grid, corresponding to the domain $\Gamma$.

      unsigned int initial_embedded_refinement = 8;
     
    -

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

    +

    The list of boundary ids where we impose (possibly inhomogeneous) Dirichlet boundary conditions. On the remaining boundary ids (if any), we impose homogeneous Neumann boundary conditions. As a default problem we have zero Dirichlet boundary conditions on $\partial \Omega$

      std::list<types::boundary_id> dirichlet_ids{0, 1, 2, 3};
     

    FiniteElement degree of the embedding space: $V_h(\Omega)$

    @@ -457,7 +457,7 @@
      std::unique_ptr<Mapping<dim, spacedim>> embedded_mapping;
     
    -

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

    +

    We do the same thing to specify the value of the forcing term $f$. In this case the Function is a scalar one.

      embedding_rhs_function;
     
    @@ -606,7 +606,7 @@
      {
    STL namespace.

    Here is a way to set default values for a ParameterAcceptor class that was constructed using ParameterAcceptorProxy.

    -

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    +

    In this case, we set the default deformation of the embedded grid to be a circle with radius $R$ and center $(Cx, Cy)$, we set the default value for the embedded_value_function to be the constant one, and specify some sensible values for the SolverControl object.

    It is fundamental for $\Gamma$ to be embedded: from the definition of $C_{\alpha j}$ is clear that, if $\Gamma \not\subseteq \Omega$, certain rows of the matrix $C$ will be zero. This would be a problem, as the Schur complement method requires $C$ to have full column rank.

      embedded_configuration_function.declare_parameters_call_back.connect(
      []() -> void {
    @@ -643,7 +643,7 @@
      TimerOutput::Scope timer_section(monitor, "Setup grids and dofs");
     
    -

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

    +

    Initializing $\Omega$: constructing the Triangulation and wrapping it into a std::unique_ptr object

      space_grid = std::make_unique<Triangulation<spacedim>>();
     

    Next, we actually create the triangulation using GridGenerator::hyper_cube(). The last argument is set to true: this activates colorization (i.e., assigning different boundary indicators to different parts of the boundary), which we use to assign the Dirichlet and Neumann conditions.

    @@ -694,7 +694,7 @@
     
      setup_embedded_dofs();
     
    -

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    +

    In this tutorial program we not only refine $\Omega$ globally, but also allow a local refinement depending on the position of $\Gamma$, according to the value of parameters.delta_refinement, that we use to decide how many rounds of local refinement we should do on $\Omega$, corresponding to the position of $\Gamma$.

    With the mapping in place, it is now possible to query what is the location of all support points associated with the embedded_dh, by calling the method DoFTools::map_dofs_to_support_points().

    This method has two variants. One that does not take a Mapping, and one that takes a Mapping. If you use the second type, like we are doing in this case, the support points are computed through the specified mapping, which can manipulate them accordingly.

    This is precisely what the embedded_mapping is there for.

    @@ -731,7 +731,7 @@
      }
     
    return_type compute_point_locations(const Cache< dim, spacedim > &cache, const std::vector< Point< spacedim > > &points, const typename Triangulation< dim, spacedim >::active_cell_iterator &cell_hint=typename Triangulation< dim, spacedim >::active_cell_iterator())
    -

    In order to construct a well posed coupling interpolation operator $C$, there are some constraints on the relative dimension of the grids between the embedding and the embedded domains. The coupling operator $C$ and the spaces $V$ and $Q$ have to satisfy an inf-sup condition in order for the problem to have a solution. It turns out that the non-matching $L^2$ projection satisfies such inf-sup, provided that the spaces $V$ and $Q$ are compatible between each other (for example, provided that they are chosen to be the ones described in the introduction).

    +

    In order to construct a well posed coupling interpolation operator $C$, there are some constraints on the relative dimension of the grids between the embedding and the embedded domains. The coupling operator $C$ and the spaces $V$ and $Q$ have to satisfy an inf-sup condition in order for the problem to have a solution. It turns out that the non-matching $L^2$ projection satisfies such inf-sup, provided that the spaces $V$ and $Q$ are compatible between each other (for example, provided that they are chosen to be the ones described in the introduction).

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-11-15 06:44:31.663691530 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_61.html 2024-11-15 06:44:31.663691530 +0000 @@ -176,15 +176,15 @@ \qquad \mathbf{x} \in \Gamma^N, \end{align*}" src="form_6214.png"/>

    -

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    -

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    +

    where $\Omega \subset \mathbb{R}^n (n=2,3)$ is a bounded domain. In the context of the flow of a fluid through a porous medium, $p$ is the pressure, $\mathbf{K}$ is a permeability tensor, $f$ is the source term, and $p_D, u_N$ represent Dirichlet and Neumann boundary conditions. We can introduce a flux, $\mathbf{u} = -\mathbf{K} \nabla p$, that corresponds to the Darcy velocity (in the way we did in step-20) and this variable will be important in the considerations below.

    +

    In this program, we will consider a test case where the exact pressure is $p = \sin \left( \pi x\right)\sin\left(\pi y \right)$ on the unit square domain, with homogeneous Dirichelet boundary conditions and $\mathbf{K}$ the identity matrix. Then we will calculate $L_2$ errors of pressure, velocity, and flux.

    Weak Galerkin scheme

    -

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    +

    The Poisson equation above has a solution $p$ that needs to satisfy the weak formulation of the problem,

    \begin{equation*}
 \mathcal{A}\left(p,q \right) = \mathcal{F} \left(q \right),
 \end{equation*}

    -

    for all test functions $q$, where

    +

    for all test functions $q$, where

    \begin{equation*}
 \mathcal{A}\left(p,q\right)
   \dealcoloneq \int_\Omega \left(\mathbf{K} \nabla p\right) \cdot \nabla q \;\mathrm{d}x,
@@ -197,8 +197,8 @@
   - \int_{\Gamma^N} u_N q \; \mathrm{d}x.
 \end{equation*}

    -

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    -

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    +

    Here, we have integrated by parts in the bilinear form, and we are evaluating the gradient of $p,p$ in the interior and the values of $q$ on the boundary of the domain. All of this is well defined because we assume that the solution is in $H^1$ for which taking the gradient and evaluating boundary values are valid operations.

    +

    The idea of the weak Galerkin method is now to approximate the exact $p$ solution with a discontinuous function $p_h$. This function may only be discontinuous along interfaces between cells, and because we will want to evaluate this function also along interfaces, we have to prescribe not only what values it is supposed to have in the cell interiors but also its values along interfaces. We do this by saying that $p_h$ is actually a tuple, $p_h=(p^\circ,p^\partial)$, though it's really just a single function that is either equal to $p^\circ(x)$ or $p^\partial(x)$, depending on whether it is evaluated at a point $x$ that lies in the cell interior or on cell interfaces.

    We would then like to simply stick this approximation into the bilinear form above. This works for the case where we have to evaluate the test function $q_h$ on the boundary (where we would simply take its interface part $q_h^\partial$) but we have to be careful with the gradient because that is only defined in cell interiors. Consequently, the weak Galerkin scheme for the Poisson equation is defined by

    \begin{equation*}
 \mathcal{A}_h\left(p_h,q \right) = \mathcal{F} \left(q_h \right),
@@ -241,7 +241,7 @@
   p_h(\mathbf x) = \sum_j P_j \varphi_j(\mathbf x).
 \end{equation*}

    -

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation topic on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    +

    Here, since $p_h$ has two components (the interior and the interface components), the same must hold true for the basis functions $\varphi_j(\mathbf x)$, which we can write as $\varphi_j = (\varphi_j^\circ,\varphi_j^\partial)$. If you've followed the descriptions in step-8, step-20, and the documentation topic on vector-valued problems, it will be no surprise that for some values of $j$, $\varphi_j^\circ$ will be zero, whereas for other values of $j$, $\varphi_j^\partial$ will be zero – i.e., shape functions will be of either one or the other kind. That is not important, here, however. What is important is that we need to wonder how we can represent $\nabla_{w,d} \varphi_j$ because that is clearly what will appear in the problem when we want to implement the bilinear form

    \begin{equation*}
 \mathcal{A}_h\left(p_h,q_h\right)
   = \sum_{K \in \mathbb{T}}
@@ -292,7 +292,7 @@
   \left(C^K\right)^T = \left(M^K\right)^{-1} G^K.
 \end{equation*}

    -

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    +

    (In this last step, we have assumed that the indices $i,j,k$ only range over those degrees of freedom active on cell $K$, thereby ensuring that the mass matrix on the space $RT_s(K)$ is invertible.) Equivalently, using the symmetry of the matrix $M$, we have that

    \begin{equation*}
   C^K = \left(G^K\right)^{T} \left(M^K\right)^{-1}.
 \end{equation*} @@ -328,7 +328,7 @@ \mathbf v_l|_K. \end{equation*}" src="form_6268.png"/>

    -

    So, if we have the matrix $C^K$ for each cell $K$, then we can easily compute the contribution $A^K$ for cell $K$ to the matrix $A$ as follows:

    +

    So, if we have the matrix $C^K$ for each cell $K$, then we can easily compute the contribution $A^K$ for cell $K$ to the matrix $A$ as follows:

    \begin{equation*}
   A^K_{ij} =
     \sum_k \sum_l C_{ik}^K C_{jl}^K
@@ -351,9 +351,9 @@
     \mathbf v_l|_K,
 \end{equation*}

    -

    which is really just the mass matrix on cell $K$ using the Raviart-Thomas basis and weighting by the permeability tensor $\mathbf K$. The derivation here then shows that the weak Galerkin method really just requires us to compute these $C^K$ and $H^K$ matrices on each cell $K$, and then $A^K = C^K H^K (C^K)^T$, which is easily computed. The code to be shown below does exactly this.

    -

    Having so computed the contribution $A^K$ of cell $K$ to the global matrix, all we have to do is to "distribute" these local contributions into the global matrix. How this is done is first shown in step-3 and step-4. In the current program, this will be facilitated by calling AffineConstraints::distribute_local_to_global().

    -

    A linear system of course also needs a right hand side. There is no difficulty associated with computing the right hand side here other than the fact that we only need to use the cell-interior part $\varphi_i^\circ$ for each shape function $\varphi_i$.

    +

    which is really just the mass matrix on cell $K$ using the Raviart-Thomas basis and weighting by the permeability tensor $\mathbf K$. The derivation here then shows that the weak Galerkin method really just requires us to compute these $C^K$ and $H^K$ matrices on each cell $K$, and then $A^K = C^K H^K (C^K)^T$, which is easily computed. The code to be shown below does exactly this.

    +

    Having so computed the contribution $A^K$ of cell $K$ to the global matrix, all we have to do is to "distribute" these local contributions into the global matrix. How this is done is first shown in step-3 and step-4. In the current program, this will be facilitated by calling AffineConstraints::distribute_local_to_global().

    +

    A linear system of course also needs a right hand side. There is no difficulty associated with computing the right hand side here other than the fact that we only need to use the cell-interior part $\varphi_i^\circ$ for each shape function $\varphi_i$.

    Post-processing and L2-errors

    The discussions in the previous sections have given us a linear system that we can solve for the numerical pressure $p_h$. We can use this to compute an approximation to the variable $\mathbf u = -{\mathbf K}\nabla p$ that corresponds to the velocity with which the medium flows in a porous medium if this is the model we are trying to solve. This kind of step – computing a derived quantity from the solution of the discrete problem – is typically called "post-processing".

    Here, instead of using the exact gradient of $p_h$, let us instead use the discrete weak gradient of $p_h$ to calculate the velocity on each element. As discussed above, on each element the gradient of the numerical pressure $\nabla p$ can be approximated by discrete weak gradients $ \nabla_{w,d}\phi_i$:

    @@ -371,9 +371,9 @@ \end{align*}" src="form_6279.png"/>

    where $C^K$ is the expansion matrix from above, and $\mathbf{v}_j$ is the basis function of the $RT$ space on a cell.

    -

    Unfortunately, $\mathbf{K} \mathbf{v}_j$ may not be in the $RT$ space (unless, of course, if $\mathbf K$ is constant times the identity matrix). So, in order to represent it in a finite element program, we need to project it back into a finite dimensional space we can work with. Here, we will use the $L_2$-projection to project it back to the (broken) $RT$ space.

    +

    Unfortunately, $\mathbf{K} \mathbf{v}_j$ may not be in the $RT$ space (unless, of course, if $\mathbf K$ is constant times the identity matrix). So, in order to represent it in a finite element program, we need to project it back into a finite dimensional space we can work with. Here, we will use the $L_2$-projection to project it back to the (broken) $RT$ space.

    We define the projection as $ \mathbf{Q}_h \left( \mathbf{K}\mathbf{v}_j \right) =
-\sum_{k} d_{jk}\mathbf{v}_k$ on each cell $K$. For any $j$, $\left( \mathbf{Q}_h \left( \mathbf{Kv}_j \right),\mathbf{v}_k \right)_K =
+\sum_{k} d_{jk}\mathbf{v}_k$ on each cell $K$. For any $j$, $\left( \mathbf{Q}_h \left( \mathbf{Kv}_j \right),\mathbf{v}_k \right)_K =
 \left( \mathbf{Kv}_j,\mathbf{v}_k \right)_K.$ So, rather than the formula shown above, the numerical velocity on cell $K$ instead becomes

    \begin{equation*}
 \mathbf{u}_h = \mathbf{Q}_h \left( -\mathbf{K}\nabla_{w,d}p_h \right) =
@@ -401,7 +401,7 @@
 \end{equation*}

    where $-\sum_{j} \sum_{i} P_ic_{ij}d_{jk}$ is called cell_velocity in the code.

    -

    Using this velocity obtained by "postprocessing" the solution, we can define the $L_2$-errors of pressure, velocity, and flux by the following formulas:

    +

    Using this velocity obtained by "postprocessing" the solution, we can define the $L_2$-errors of pressure, velocity, and flux by the following formulas:

    \begin{align*}
 \|p-p_h^\circ\|^2
   &= \sum_{K \in \mathbb{T}} \|p-p_h^\circ\|_{L_2(K)}^2, \\
@@ -412,7 +412,7 @@
     \frac{|K|}{|\gamma|} \|\mathbf{u} \cdot \mathbf{n} - \mathbf{u}_h \cdot \mathbf{n}\|_{L_2(\gamma)}^2,
 \end{align*}

    -

    where $| K |$ is the area of the element, $\gamma$ are faces of the element, $\mathbf{n}$ are unit normal vectors of each face. The last of these norms measures the accuracy of the normal component of the velocity vectors over the interfaces between the cells of the mesh. The scaling factor $|K|/|\gamma|$ is chosen so as to scale out the difference in the length (or area) of the collection of interfaces as the mesh size changes.

    +

    where $| K |$ is the area of the element, $\gamma$ are faces of the element, $\mathbf{n}$ are unit normal vectors of each face. The last of these norms measures the accuracy of the normal component of the velocity vectors over the interfaces between the cells of the mesh. The scaling factor $|K|/|\gamma|$ is chosen so as to scale out the difference in the length (or area) of the collection of interfaces as the mesh size changes.

    The first of these errors above is easily computed using VectorTools::integrate_difference. The others require a bit more work and are implemented in the code below.

    The commented program

    Include files

    @@ -455,7 +455,7 @@
     

    The WGDarcyEquation class template

    -

    This is the main class of this program. We will solve for the numerical pressure in the interior and on faces using the weak Galerkin (WG) method, and calculate the $L_2$ error of pressure. In the post-processing step, we will also calculate $L_2$-errors of the velocity and flux.

    +

    This is the main class of this program. We will solve for the numerical pressure in the interior and on faces using the weak Galerkin (WG) method, and calculate the $L_2$ error of pressure. In the post-processing step, we will also calculate $L_2$-errors of the velocity and flux.

    The structure of the class is not fundamentally different from that of previous tutorial programs, so there is little need to comment on the details with one exception: The class has a member variable fe_dgrt that corresponds to the "broken" Raviart-Thomas space mentioned in the introduction. There is a matching dof_handler_dgrt that represents a global enumeration of a finite element field created from this element, and a vector darcy_velocity that holds nodal values for this field. We will use these three variables after solving for the pressure to compute a postprocessed velocity field for which we can then evaluate the error and which we can output for visualization.

      template <int dim>
      class WGDarcyEquation
    @@ -504,7 +504,7 @@
    const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation

    Right hand side, boundary values, and exact solution

    -

    Next, we define the coefficient matrix $\mathbf{K}$ (here, the identity matrix), Dirichlet boundary conditions, the right-hand side $f = 2\pi^2 \sin(\pi x) \sin(\pi y)$, and the exact solution that corresponds to these choices for $K$ and $f$, namely $p =
+<p>Next, we define the coefficient matrix <picture><source srcset=$\mathbf{K}$ (here, the identity matrix), Dirichlet boundary conditions, the right-hand side $f = 2\pi^2 \sin(\pi x) \sin(\pi y)$, and the exact solution that corresponds to these choices for $K$ and $f$, namely $p =
    \sin(\pi x) \sin(\pi y)$.

      template <int dim>
      class Coefficient : public TensorFunction<2, dim>
    @@ -867,7 +867,7 @@

    cell_matrix_C is then the matrix product between the transpose of $G^K$ and the inverse of the mass matrix (where this inverse is stored in cell_matrix_M):

      cell_matrix_G.Tmmult(cell_matrix_C, cell_matrix_M);
     
    -

    Finally we can compute the local matrix $A^K$. Element $A^K_{ij}$ is given by $\int_{E} \sum_{k,l} C_{ik} C_{jl}
+</div><!-- fragment --><p>Finally we can compute the local matrix <picture><source srcset=$A^K$. Element $A^K_{ij}$ is given by $\int_{E} \sum_{k,l} C_{ik} C_{jl}
    (\mathbf{K} \mathbf{v}_k) \cdot \mathbf{v}_l
    \mathrm{d}x$. We have calculated the coefficients $C$ in the previous step, and so obtain the following after suitably re-arranging the loops:

      local_matrix = 0;
    @@ -989,7 +989,7 @@
      const FEValuesExtractors::Scalar pressure_interior(0);
      const FEValuesExtractors::Scalar pressure_face(1);
     
    -

    In the introduction, we explained how to calculate the numerical velocity on the cell. We need the pressure solution values on each cell, coefficients of the Gram matrix and coefficients of the $L_2$ projection. We have already calculated the global solution, so we will extract the cell solution from the global solution. The coefficients of the Gram matrix have been calculated when we assembled the system matrix for the pressures. We will do the same way here. For the coefficients of the projection, we do matrix multiplication, i.e., the inverse of the Gram matrix times the matrix with $(\mathbf{K} \mathbf{w}, \mathbf{w})$ as components. Then, we multiply all these coefficients and call them beta. The numerical velocity is the product of beta and the basis functions of the Raviart-Thomas space.

    +

    In the introduction, we explained how to calculate the numerical velocity on the cell. We need the pressure solution values on each cell, coefficients of the Gram matrix and coefficients of the $L_2$ projection. We have already calculated the global solution, so we will extract the cell solution from the global solution. The coefficients of the Gram matrix have been calculated when we assembled the system matrix for the pressures. We will do the same way here. For the coefficients of the projection, we do matrix multiplication, i.e., the inverse of the Gram matrix times the matrix with $(\mathbf{K} \mathbf{w}, \mathbf{w})$ as components. Then, we multiply all these coefficients and call them beta. The numerical velocity is the product of beta and the basis functions of the Raviart-Thomas space.

      cell = dof_handler.begin_active(),
      endc = dof_handler.end(), cell_dgrt = dof_handler_dgrt.begin_active();
    @@ -1021,7 +1021,7 @@
      }
      }
     
    -

    To compute the matrix $D$ mentioned in the introduction, we then need to evaluate $D=M^{-1}E$ as explained in the introduction:

    +

    To compute the matrix $D$ mentioned in the introduction, we then need to evaluate $D=M^{-1}E$ as explained in the introduction:

      cell_matrix_M.gauss_jordan();
      cell_matrix_M.mmult(cell_matrix_D, cell_matrix_E);
     
    @@ -1092,7 +1092,7 @@
     
     

    WGDarcyEquation<dim>::compute_pressure_error

    -

    This part is to calculate the $L_2$ error of the pressure. We define a vector that holds the norm of the error on each cell. Next, we use VectorTool::integrate_difference() to compute the error in the $L_2$ norm on each cell. However, we really only care about the error in the interior component of the solution vector (we can't even evaluate the interface pressures at the quadrature points because these are all located in the interior of cells) and consequently have to use a weight function that ensures that the interface component of the solution variable is ignored. This is done by using the ComponentSelectFunction whose arguments indicate which component we want to select (component zero, i.e., the interior pressures) and how many components there are in total (two).

    +

    This part is to calculate the $L_2$ error of the pressure. We define a vector that holds the norm of the error on each cell. Next, we use VectorTool::integrate_difference() to compute the error in the $L_2$ norm on each cell. However, we really only care about the error in the interior component of the solution vector (we can't even evaluate the interface pressures at the quadrature points because these are all located in the interior of cells) and consequently have to use a weight function that ensures that the interface component of the solution variable is ignored. This is done by using the ComponentSelectFunction whose arguments indicate which component we want to select (component zero, i.e., the interior pressures) and how many components there are in total (two).

      template <int dim>
      void WGDarcyEquation<dim>::compute_pressure_error()
      {
    @@ -1116,7 +1116,7 @@
    void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)

    WGDarcyEquation<dim>::compute_velocity_error

    -

    In this function, we evaluate $L_2$ errors for the velocity on each cell, and $L_2$ errors for the flux on faces. The function relies on the compute_postprocessed_velocity() function having previous computed, which computes the velocity field based on the pressure solution that has previously been computed.

    +

    In this function, we evaluate $L_2$ errors for the velocity on each cell, and $L_2$ errors for the flux on faces. The function relies on the compute_postprocessed_velocity() function having previous computed, which computes the velocity field based on the pressure solution that has previously been computed.

    We are going to evaluate velocities on each cell and calculate the difference between numerical and exact velocities.

      template <int dim>
      void WGDarcyEquation<dim>::compute_velocity_errors()
    @@ -1156,7 +1156,7 @@
      {
      fe_values_dgrt.reinit(cell_dgrt);
     
    -

    First compute the $L_2$ error between the postprocessed velocity field and the exact one:

    +

    First compute the $L_2$ error between the postprocessed velocity field and the exact one:

      fe_values_dgrt[velocities].get_function_values(darcy_velocity,
      velocity_values);
      double L2_err_velocity_cell_sqr_local = 0;
    @@ -1172,7 +1172,7 @@
      }
      L2_err_velocity_cell_sqr_global += L2_err_velocity_cell_sqr_local;
     
    -

    For reconstructing the flux we need the size of cells and faces. Since fluxes are calculated on faces, we have the loop over all four faces of each cell. To calculate the face velocity, we extract values at the quadrature points from the darcy_velocity which we have computed previously. Then, we calculate the squared velocity error in normal direction. Finally, we calculate the $L_2$ flux error on the cell by appropriately scaling with face and cell areas and add it to the global error.

    +

    For reconstructing the flux we need the size of cells and faces. Since fluxes are calculated on faces, we have the loop over all four faces of each cell. To calculate the face velocity, we extract values at the quadrature points from the darcy_velocity which we have computed previously. Then, we calculate the squared velocity error in normal direction. Finally, we calculate the $L_2$ flux error on the cell by appropriately scaling with face and cell areas and add it to the global error.

      const double cell_area = cell_dgrt->measure();
      for (const auto &face_dgrt : cell_dgrt->face_iterators())
      {
    @@ -1202,7 +1202,7 @@
      }
      }
     
    -

    After adding up errors over all cells and faces, we take the square root and get the $L_2$ errors of velocity and flux. These we output to screen.

    +

    After adding up errors over all cells and faces, we take the square root and get the $L_2$ errors of velocity and flux. These we output to screen.

      const double L2_err_velocity_cell =
      std::sqrt(L2_err_velocity_cell_sqr_global);
      const double L2_err_flux_face = std::sqrt(L2_err_flux_sqr);
    @@ -1317,7 +1317,7 @@
      return 0;
      }

    Results

    -

    We run the program with a right hand side that will produce the solution $p = \sin(\pi x) \sin(\pi y)$ and with homogeneous Dirichlet boundary conditions in the domain $\Omega = (0,1)^2$. In addition, we choose the coefficient matrix in the differential operator $\mathbf{K}$ as the identity matrix. We test this setup using $\mbox{WG}(Q_0,Q_0;RT_{[0]})$, $\mbox{WG}(Q_1,Q_1;RT_{[1]})$ and $\mbox{WG}(Q_2,Q_2;RT_{[2]})$ element combinations, which one can select by using the appropriate constructor argument for the WGDarcyEquation object in main(). We will then visualize pressure values in interiors of cells and on faces. We want to see that the pressure maximum is around 1 and the minimum is around 0. With mesh refinement, the convergence rates of pressure, velocity and flux should then be around 1 for $\mbox{WG}(Q_0,Q_0;RT_{[0]})$ , 2 for $\mbox{WG}(Q_1,Q_1;RT_{[1]})$, and 3 for $\mbox{WG}(Q_2,Q_2;RT_{[2]})$.

    +

    We run the program with a right hand side that will produce the solution $p = \sin(\pi x) \sin(\pi y)$ and with homogeneous Dirichlet boundary conditions in the domain $\Omega = (0,1)^2$. In addition, we choose the coefficient matrix in the differential operator $\mathbf{K}$ as the identity matrix. We test this setup using $\mbox{WG}(Q_0,Q_0;RT_{[0]})$, $\mbox{WG}(Q_1,Q_1;RT_{[1]})$ and $\mbox{WG}(Q_2,Q_2;RT_{[2]})$ element combinations, which one can select by using the appropriate constructor argument for the WGDarcyEquation object in main(). We will then visualize pressure values in interiors of cells and on faces. We want to see that the pressure maximum is around 1 and the minimum is around 0. With mesh refinement, the convergence rates of pressure, velocity and flux should then be around 1 for $\mbox{WG}(Q_0,Q_0;RT_{[0]})$ , 2 for $\mbox{WG}(Q_1,Q_1;RT_{[1]})$, and 3 for $\mbox{WG}(Q_2,Q_2;RT_{[2]})$.

    Test results on WG(Q0,Q0;RT[0])

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-11-15 06:44:31.747692280 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_62.html 2024-11-15 06:44:31.747692280 +0000 @@ -167,10 +167,10 @@
    Note
    As a prerequisite of this program, you need to have HDF5, complex PETSc, and the p4est libraries installed. The installation of deal.II together with these additional libraries is described in the README file.

    Introduction

    A phononic crystal is a periodic nanostructure that modifies the motion of mechanical vibrations or phonons. Phononic structures can be used to disperse, route and confine mechanical vibrations. These structures have potential applications in quantum information and have been used to study macroscopic quantum phenomena. Phononic crystals are usually fabricated in cleanrooms.

    -

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    +

    In this tutorial we show how to a design a phononic superlattice cavity which is a particular type of phononic crystal that can be used to confine mechanical vibrations. A phononic superlattice cavity is formed by two Distributed Bragg Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. Superlattice cavities are usually grown on a Gallium Arsenide wafer by Molecular Beam Epitaxy. The bilayers correspond to GaAs/AlAs mirror pairs. As shown below, the thickness of the mirror layers (brown and green) is $\lambda/4$ and the thickness of the cavity (blue) is $\lambda/2$.

    Phononic superlattice cavity

    In this tutorial we calculate the band gap and the mechanical resonance of a phononic superlattice cavity but the code presented here can be easily used to design and calculate other types of phononic crystals.

    -

    The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width:

      +

      The device is a waveguide in which the wave goes from left to right. The simulations of this tutorial are done in 2D, but the code is dimension independent and can be easily used with 3D simulations. The waveguide width is equal to the $y$ dimension of the domain and the waveguide length is equal to the $x$ dimension of the domain. There are two regimes that depend on the waveguide width:

      • Single mode: In this case the width of the structure is much smaller than the wavelength. This case can be solved either with FEM (the approach that we take here) or with a simple semi-analytical 1D transfer matrix formalism.
      • Multimode: In this case the width of the structure is larger than the wavelength. This case can be solved using FEM or with a scattering matrix formalism. Although we do not study this case in this tutorial, it is very easy to reach the multimode regime by increasing the parameter waveguide width (dimension_y in the jupyter notebook).
      @@ -214,7 +214,7 @@ + \frac{1}{s_l}\partial_l u_k\right) \]" src="form_6327.png"/>

      -

      where summation over repeated indices (here $n$, as well as $k$ and $l$) is as always implied. Note that the strain is no longer symmetric after applying the complex coordinate stretching of the PML. This set of equations can be written as

      +

      where summation over repeated indices (here $n$, as well as $k$ and $l$) is as always implied. Note that the strain is no longer symmetric after applying the complex coordinate stretching of the PML. This set of equations can be written as

      \[
 -\omega^2\rho \xi  u_m - \partial_n \left(\frac{\xi c_{mnkl}}{2s_n s_k} \partial_k u_l
 + \frac{\xi c_{mnkl}}{2s_n s_l} \partial_l u_k\right) = f_m
@@ -226,7 +226,7 @@
 +  \beta_{mnkl}\partial_l u_k\right) = f_m
 \]

      -

      We can multiply by $\varphi_m$ and integrate over the domain $\Omega$ and integrate by parts.

      +

      We can multiply by $\varphi_m$ and integrate over the domain $\Omega$ and integrate by parts.

      \begin{eqnarray*}
 -\omega^2\int_\Omega\rho\xi\varphi_m u_m + \int_\Omega\partial_n\varphi_m \left(\frac{\xi c_{mnkl}}{2s_n s_k} \partial_k u_l
 + \frac{\xi c_{mnkl}}{2s_n s_l} \partial_l u_k\right) = \int_\Omega\varphi_m f_m
@@ -404,7 +404,7 @@
 <div class=  const Point<dim> force_center;

       
        public:
      -

      In this particular simulation the force has only a $x$ component, $F_y=0$.

      +

      In this particular simulation the force has only a $x$ component, $F_y=0$.

        const unsigned int force_component = 0;
        };
       
      @@ -525,7 +525,7 @@
       

      The get_stiffness_tensor() function

      -

      This function returns the stiffness tensor of the material. For the sake of simplicity we consider the stiffness to be isotropic and homogeneous; only the density $\rho$ depends on the position. As we have previously shown in step-8, if the stiffness is isotropic and homogeneous, the stiffness coefficients $c_{ijkl}$ can be expressed as a function of the two coefficients $\lambda$ and $\mu$. The coefficient tensor reduces to

      +

      This function returns the stiffness tensor of the material. For the sake of simplicity we consider the stiffness to be isotropic and homogeneous; only the density $\rho$ depends on the position. As we have previously shown in step-8, if the stiffness is isotropic and homogeneous, the stiffness coefficients $c_{ijkl}$ can be expressed as a function of the two coefficients $\lambda$ and $\mu$. The coefficient tensor reduces to

      \[
    c_{ijkl}
    =
@@ -668,7 +668,7 @@
    \right.\\ F_y &= 0
    \end{align*}

      -

      where $a$ is the maximum amplitude that takes the force and $\sigma_x$ and $\sigma_y$ are the standard deviations for the $x$ and $y$ components. Note that the pulse has been cropped to $x_\textrm{min}<x<x_\textrm{max}$ and $y_\textrm{min} <y<y_\textrm{max}$.

      +

      where $a$ is the maximum amplitude that takes the force and $\sigma_x$ and $\sigma_y$ are the standard deviations for the $x$ and $y$ components. Note that the pulse has been cropped to $x_\textrm{min}<x<x_\textrm{max}$ and $y_\textrm{min} <y<y_\textrm{max}$.

        template <int dim>
        double RightHandSide<dim>::value(const Point<dim> &p,
        const unsigned int component) const
      @@ -763,7 +763,7 @@
      ::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
      ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

      The Rho class implementation

      -

      This class is used to define the mass density. As we have explained before, a phononic superlattice cavity is formed by two Distributed Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. The change of in the wave velocity is generated by alternating layers with different density.

      +

      This class is used to define the mass density. As we have explained before, a phononic superlattice cavity is formed by two Distributed Reflector, mirrors and a $\lambda/2$ cavity where $\lambda$ is the acoustic wavelength. Acoustic DBRs are periodic structures where a set of bilayer stacks with contrasting physical properties (sound velocity index) is repeated $N$ times. The change of in the wave velocity is generated by alternating layers with different density.

        template <int dim>
        Rho<dim>::Rho(HDF5::Group &data)
        : Function<dim>(1)
      @@ -1069,7 +1069,7 @@
        std::vector<Vector<std::complex<double>>> pml_values(
        n_q_points, Vector<std::complex<double>>(dim));
       
      -

      We calculate the stiffness tensor for the $\lambda$ and $\mu$ that have been defined in the jupyter notebook. Note that contrary to $\rho$ the stiffness is constant among for the whole domain.

      +

      We calculate the stiffness tensor for the $\lambda$ and $\mu$ that have been defined in the jupyter notebook. Note that contrary to $\rho$ the stiffness is constant among for the whole domain.

        const SymmetricTensor<4, dim> stiffness_tensor =
        get_stiffness_tensor<dim>(parameters.lambda, parameters.mu);
       
      @@ -1110,7 +1110,7 @@
        QuadratureCache<dim> &quadrature_data =
        local_quadrature_points_data[q];
       
      -

      Below we declare the force vector and the parameters of the PML $s$ and $\xi$.

      +

      Below we declare the force vector and the parameters of the PML $s$ and $\xi$.

        Tensor<1, dim> force;
        std::complex<double> xi(1, 0);
      @@ -1178,7 +1178,7 @@ =\partial_j \phi_i \]" src="form_6349.png"/>

      -

      Note the position of the indices $i$ and $j$ and the notation that we use in this tutorial: $\partial_j\phi_i$. As the stiffness tensor is not symmetric, it is very easy to make a mistake.

      +

      Note the position of the indices $i$ and $j$ and the notation that we use in this tutorial: $\partial_j\phi_i$. As the stiffness tensor is not symmetric, it is very easy to make a mistake.

        stiffness_coefficient +=
        grad_phi_i[m][n] *
        (alpha[m][n][k][l] * grad_phi_j[l][k] +
      @@ -1275,7 +1275,7 @@
        {
        TimerOutput::Scope t(computing_timer, "store_frequency_step_data");
       
      -

      We store the displacement in the $x$ direction; the displacement in the $y$ direction is negligible.

      +

      We store the displacement in the $x$ direction; the displacement in the $y$ direction is negligible.

        const unsigned int probe_displacement_component = 0;
       

      The vector coordinates contains the coordinates in the HDF5 file of the points of the probe that are located in locally owned cells. The vector displacement_data contains the value of the displacement at these points.

      @@ -1608,8 +1608,8 @@
        for (const std::string &group_name : group_names)
        {

      For each of these two group names, we now create the group and put attributes into these groups. Specifically, these are:

        -
      • The dimensions of the waveguide (in $x$ and $y$ directions)
      • -
      • The position of the probe (in $x$ and $y$ directions)
      • +
      • The dimensions of the waveguide (in $x$ and $y$ directions)
      • +
      • The position of the probe (in $x$ and $y$ directions)
      • The number of points in the probe
      • The global refinement level
      • The cavity resonance frequency
      • /usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 2024-11-15 06:44:31.823692959 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_63.html 2024-11-15 06:44:31.827692995 +0000 @@ -166,40 +166,40 @@

        This program solves an advection-diffusion problem using a geometric multigrid (GMG) preconditioner. The basics of this preconditioner are discussed in step-16; here we discuss the necessary changes needed for a non-symmetric PDE. Additionally, we introduce the idea of block smoothing (as compared to point smoothing in step-16), and examine the effects of DoF renumbering for additive and multiplicative smoothers.

        Equation

        The advection-diffusion equation is given by

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 -\varepsilon \Delta u + \boldsymbol{\beta}\cdot \nabla u & = f &
 \text{ in } \Omega\\
 u &= g & \text{ on } \partial\Omega
-\end{align*} +\end{align*}" src="form_6357.png"/>

        -

        where $\varepsilon>0$, $\boldsymbol{\beta}$ is the advection direction, and $f$ is a source. A few notes:

        +

        where $\varepsilon>0$, $\boldsymbol{\beta}$ is the advection direction, and $f$ is a source. A few notes:

          -
        1. If $\boldsymbol{\beta}=\boldsymbol{0}$, this is the Laplace equation solved in step-16 (and many other places).
        2. -
        3. If $\varepsilon=0$ then this is the stationary advection equation solved in step-9.
        4. -
        5. One can define a dimensionless number for this problem, called the Peclet number: $\mathcal{P} \dealcoloneq \frac{\|\boldsymbol{\beta}\|
-L}{\varepsilon}$, where $L$ is the length scale of the domain. It characterizes the kind of equation we are considering: If $\mathcal{P}>1$, we say the problem is advection-dominated, else if $\mathcal{P}<1$ we will say the problem is diffusion-dominated.
        6. +
        7. If $\boldsymbol{\beta}=\boldsymbol{0}$, this is the Laplace equation solved in step-16 (and many other places).
        8. +
        9. If $\varepsilon=0$ then this is the stationary advection equation solved in step-9.
        10. +
        11. One can define a dimensionless number for this problem, called the Peclet number: $\mathcal{P} \dealcoloneq \frac{\|\boldsymbol{\beta}\|
+L}{\varepsilon}$, where $L$ is the length scale of the domain. It characterizes the kind of equation we are considering: If $\mathcal{P}>1$, we say the problem is advection-dominated, else if $\mathcal{P}<1$ we will say the problem is diffusion-dominated.

        For the discussion in this tutorial we will be concerned with advection-dominated flow. This is the complicated case: We know that for diffusion-dominated problems, the standard Galerkin method works just fine, and we also know that simple multigrid methods such as those defined in step-16 are very efficient. On the other hand, for advection-dominated problems, the standard Galerkin approach leads to oscillatory and unstable discretizations, and simple solvers are often not very efficient. This tutorial program is therefore intended to address both of these issues.

        Streamline diffusion

        Using the standard Galerkin finite element method, for suitable test functions $v_h$, a discrete weak form of the PDE would read

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,v_h) = F(v_h)
-\end{align*} +\end{align*}" src="form_6365.png"/>

        where

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,v_h) &= (\varepsilon \nabla v_h,\, \nabla u_h) +
 (v_h,\,\boldsymbol{\beta}\cdot \nabla u_h),\\
 F(v_h) &= (v_h,\,f).
-\end{align*} +\end{align*}" src="form_6366.png"/>

        Unfortunately, one typically gets oscillatory solutions with this approach. Indeed, the following error estimate can be shown for this formulation:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \|\nabla (u-u_h)\| \leq (1+\mathcal{P}) \inf_{v_h} \|\nabla (u-v_h)\|.
-\end{align*} +\end{align*}" src="form_6367.png"/>

        The infimum on the right can be estimated as follows if the exact solution is sufficiently smooth:

        -\begin{align*}
+<picture><source srcset=\begin{align*}
   \inf_{v_h} \|\nabla (u-v_h)\|.
   \le
   \|\nabla (u-I_h u)\|
@@ -207,52 +207,52 @@
   h^k
   C
   \|\nabla^k u)\|
-\end{align*} +\end{align*}" src="form_6368.png"/>

        where $k$ is the polynomial degree of the finite elements used. As a consequence, we obtain the estimate

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \|\nabla (u-u_h)\|
 \leq (1+\mathcal{P}) C h^k
   \|\nabla^k u)\|.
-\end{align*} +\end{align*}" src="form_6369.png"/>

        -

        In other words, the numerical solution will converge. On the other hand, given the definition of $\mathcal{P}$ above, we have to expect poor numerical solutions with a large error when $\varepsilon \ll
-\|\boldsymbol{\beta}\| L$, i.e., if the problem has only a small amount of diffusion.

        +

        In other words, the numerical solution will converge. On the other hand, given the definition of $\mathcal{P}$ above, we have to expect poor numerical solutions with a large error when $\varepsilon \ll
+\|\boldsymbol{\beta}\| L$, i.e., if the problem has only a small amount of diffusion.

        To combat this, we will consider the new weak form

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 a(u_h,\,v_h) + \sum_K (-\varepsilon \Delta u_h +
 \boldsymbol{\beta}\cdot \nabla u_h-f,\,\delta_K
 \boldsymbol{\beta}\cdot \nabla v_h)_K = F(v_h)
-\end{align*} +\end{align*}" src="form_6372.png"/>

        -

        where the sum is done over all cells $K$ with the inner product taken for each cell, and $\delta_K$ is a cell-wise constant stabilization parameter defined in [john2006discontinuity].

        -

        Essentially, adding in the discrete strong form residual enhances the coercivity of the bilinear form $a(\cdot,\cdot)$ which increases the stability of the discrete solution. This method is commonly referred to as streamline diffusion or SUPG (streamline upwind/Petrov-Galerkin).

        +

        where the sum is done over all cells $K$ with the inner product taken for each cell, and $\delta_K$ is a cell-wise constant stabilization parameter defined in [john2006discontinuity].

        +

        Essentially, adding in the discrete strong form residual enhances the coercivity of the bilinear form $a(\cdot,\cdot)$ which increases the stability of the discrete solution. This method is commonly referred to as streamline diffusion or SUPG (streamline upwind/Petrov-Galerkin).

        Smoothers

        One of the goals of this tutorial is to expand from using a simple (point-wise) Gauss-Seidel (SOR) smoother that is used in step-16 (class PreconditionSOR) on each level of the multigrid hierarchy. The term "point-wise" is traditionally used in solvers to indicate that one solves at one "grid point" at a time; for scalar problems, this means to use a solver that updates one unknown of the linear system at a time, keeping all of the others fixed; one would then iterate over all unknowns in the problem and, once done, start over again from the first unknown until these "sweeps" converge. Jacobi, Gauss-Seidel, and SOR iterations can all be interpreted in this way. In the context of multigrid, one does not think of these methods as "solvers", but as "smoothers". As such, one is not interested in actually solving the linear system. It is enough to remove the high-frequency part of the residual for the multigrid method to work, because that allows restricting the solution to a coarser mesh. Therefore, one only does a few, fixed number of "sweeps" over all unknowns. In the code in this tutorial this is controlled by the "Smoothing steps" parameter.

        But these methods are known to converge rather slowly when used as solvers. While as multigrid smoothers, they are surprisingly good, they can also be improved upon. In particular, we consider "cell-based" smoothers here as well. These methods solve for all unknowns on a cell at once, keeping all other unknowns fixed; they then move on to the next cell, and so on and so forth. One can think of them as "block" versions of Jacobi, Gauss-Seidel, or SOR, but because degrees of freedom are shared among multiple cells, these blocks overlap and the methods are in fact best be explained within the framework of additive and multiplicative Schwarz methods.

        -

        In contrast to step-16, our test problem contains an advective term. Especially with a small diffusion constant $\varepsilon$, information is transported along streamlines in the given advection direction. This means that smoothers are likely to be more effective if they allow information to travel in downstream direction within a single smoother application. If we want to solve one unknown (or block of unknowns) at a time in the order in which these unknowns (or blocks) are enumerated, then this information propagation property requires reordering degrees of freedom or cells (for the cell-based smoothers) accordingly so that the ones further upstream are treated earlier (have lower indices) and those further downstream are treated later (have larger indices). The influence of the ordering will be visible in the results section.

        +

        In contrast to step-16, our test problem contains an advective term. Especially with a small diffusion constant $\varepsilon$, information is transported along streamlines in the given advection direction. This means that smoothers are likely to be more effective if they allow information to travel in downstream direction within a single smoother application. If we want to solve one unknown (or block of unknowns) at a time in the order in which these unknowns (or blocks) are enumerated, then this information propagation property requires reordering degrees of freedom or cells (for the cell-based smoothers) accordingly so that the ones further upstream are treated earlier (have lower indices) and those further downstream are treated later (have larger indices). The influence of the ordering will be visible in the results section.

        Let us now briefly define the smoothers used in this tutorial. For a more detailed introduction, we refer to [KanschatNotesIterative] and the books [smith2004domain] and [toselli2006domain]. A Schwarz preconditioner requires a decomposition

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 V = \sum_{j=1}^J V_j
-\end{align*} +\end{align*}" src="form_6375.png"/>

        -

        of our finite element space $V$. Each subproblem $V_j$ also has a Ritz projection $P_j: V \rightarrow V_j$ based on the bilinear form $a(\cdot,\cdot)$. This projection induces a local operator $A_j$ for each subproblem $V_j$. If $\Pi_j:V\rightarrow V_j$ is the orthogonal projector onto $V_j$, one can show $A_jP_j=\Pi_j^TA$.

        +

        of our finite element space $V$. Each subproblem $V_j$ also has a Ritz projection $P_j: V \rightarrow V_j$ based on the bilinear form $a(\cdot,\cdot)$. This projection induces a local operator $A_j$ for each subproblem $V_j$. If $\Pi_j:V\rightarrow V_j$ is the orthogonal projector onto $V_j$, one can show $A_jP_j=\Pi_j^TA$.

        With this we can define an additive Schwarz preconditioner for the operator $A$ as

        -\begin{align*}
+<picture><source srcset=\begin{align*}
  B^{-1} = \sum_{j=1}^J P_j A^{-1} = \sum_{j=1}^J A_j^{-1} \Pi_j^T.
-\end{align*} +\end{align*}" src="form_6380.png"/>

        -

        In other words, we project our solution into each subproblem, apply the inverse of the subproblem $A_j$, and sum the contributions up over all $j$.

        -

        Note that one can interpret the point-wise (one unknown at a time) Jacobi method as an additive Schwarz method by defining a subproblem $V_j$ for each degree of freedom. Then, $A_j^{-1}$ becomes a multiplication with the inverse of a diagonal entry of $A$.

        -

        For the "Block Jacobi" method used in this tutorial, we define a subproblem $V_j$ for each cell of the mesh on the current level. Note that we use a continuous finite element, so these blocks are overlapping, as degrees of freedom on an interface between two cells belong to both subproblems. The logic for the Schwarz operator operating on the subproblems (in deal.II they are called "blocks") is implemented in the class RelaxationBlock. The "Block -Jacobi" method is implemented in the class RelaxationBlockJacobi. Many aspects of the class (for example how the blocks are defined and how to invert the local subproblems $A_j$) can be configured in the smoother data, see RelaxationBlock::AdditionalData and DoFTools::make_cell_patches() for details.

        +

        In other words, we project our solution into each subproblem, apply the inverse of the subproblem $A_j$, and sum the contributions up over all $j$.

        +

        Note that one can interpret the point-wise (one unknown at a time) Jacobi method as an additive Schwarz method by defining a subproblem $V_j$ for each degree of freedom. Then, $A_j^{-1}$ becomes a multiplication with the inverse of a diagonal entry of $A$.

        +

        For the "Block Jacobi" method used in this tutorial, we define a subproblem $V_j$ for each cell of the mesh on the current level. Note that we use a continuous finite element, so these blocks are overlapping, as degrees of freedom on an interface between two cells belong to both subproblems. The logic for the Schwarz operator operating on the subproblems (in deal.II they are called "blocks") is implemented in the class RelaxationBlock. The "Block +Jacobi" method is implemented in the class RelaxationBlockJacobi. Many aspects of the class (for example how the blocks are defined and how to invert the local subproblems $A_j$) can be configured in the smoother data, see RelaxationBlock::AdditionalData and DoFTools::make_cell_patches() for details.

        So far, we discussed additive smoothers where the updates can be applied independently and there is no information flowing within a single smoother application. A multiplicative Schwarz preconditioner addresses this and is defined by

        -\begin{align*}
+<picture><source srcset=\begin{align*}
  B^{-1} = \left( I- \prod_{j=1}^J \left(I-P_j\right) \right) A^{-1}.
-\end{align*} +\end{align*}" src="form_6382.png"/>

        -

        In contrast to above, the updates on the subproblems $V_j$ are applied sequentially. This means that the update obtained when inverting the subproblem $A_j$ is immediately used in $A_{j+1}$. This becomes visible when writing out the project:

        -\begin{align*}
+<p> In contrast to above, the updates on the subproblems <picture><source srcset=$V_j$ are applied sequentially. This means that the update obtained when inverting the subproblem $A_j$ is immediately used in $A_{j+1}$. This becomes visible when writing out the project:

        +\begin{align*}
  B^{-1}
  =
  \left(
@@ -267,18 +267,18 @@
    \left[ \left(I-P_1\right)
    \left[ \left(I-P_2\right)\cdots
      \left[\left(I-P_J\right) A^{-1}\right] \cdots \right] \right]
-\end{align*} +\end{align*}" src="form_6384.png"/>

        -

        When defining the sub-spaces $V_j$ as whole blocks of degrees of freedom, this method is implemented in the class RelaxationBlockSOR and used when you select "Block SOR" in this tutorial. The class RelaxationBlockSOR is also derived from RelaxationBlock. As such, both additive and multiplicative Schwarz methods are implemented in a unified framework.

        +

        When defining the sub-spaces $V_j$ as whole blocks of degrees of freedom, this method is implemented in the class RelaxationBlockSOR and used when you select "Block SOR" in this tutorial. The class RelaxationBlockSOR is also derived from RelaxationBlock. As such, both additive and multiplicative Schwarz methods are implemented in a unified framework.

        Finally, let us note that the standard Gauss-Seidel (or SOR) method can be seen as a multiplicative Schwarz method with a subproblem for each DoF.

        Test problem

        -

        We will be considering the following test problem: $\Omega =
-[-1,\,1]\times[-1,\,1]\backslash B_{0.3}(0)$, i.e., a square with a circle of radius 0.3 centered at the origin removed. In addition, we use $\varepsilon=0.005$, $\boldsymbol{\beta} =
-[-\sin(\pi/6),\,\cos(\pi/6)]$, $f=0$, and Dirichlet boundary values

        -\begin{align*}
+<p>We will be considering the following test problem:  <picture><source srcset=$\Omega =
+[-1,\,1]\times[-1,\,1]\backslash B_{0.3}(0)$, i.e., a square with a circle of radius 0.3 centered at the origin removed. In addition, we use $\varepsilon=0.005$, $\boldsymbol{\beta} =
+[-\sin(\pi/6),\,\cos(\pi/6)]$, $f=0$, and Dirichlet boundary values

        +\begin{align*}
 g = \left\{\begin{array}{ll} 1 & \text{if } x=-1 \text{ or } y=-1,\,x\geq 0.5 \\
 0 & \text{otherwise} \end{array}\right.
-\end{align*} +\end{align*}" src="form_6388.png"/>

        The following figures depict the solutions with (left) and without (right) streamline diffusion. Without streamline diffusion we see large oscillations around the boundary layer, demonstrating the instability of the standard Galerkin finite element method for this problem.

        @@ -670,7 +670,7 @@
          Assert(component == 0, ExcIndexRange(component, 0, 1));
          (void)component;
         
        -

        Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

        +

        Set boundary to 1 if $x=1$, or if $x>0.5$ and $y=-1$.

          if (std::fabs(p[0] - 1) < 1e-8 ||
          (std::fabs(p[1] + 1) < 1e-8 && p[0] >= 0.5))
          {
        @@ -959,7 +959,7 @@
          right_hand_side.value_list(scratch_data.fe_values.get_quadrature_points(),
          rhs_values);
         
        -

        If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

        +

        If we are using streamline diffusion we must add its contribution to both the cell matrix and the cell right-hand side. If we are not using streamline diffusion, setting $\delta=0$ negates this contribution below and we are left with the standard, Galerkin finite element assembly.

          const double delta = (settings.with_streamline_diffusion ?
          compute_stabilization_delta(cell->diameter(),
          settings.epsilon,
        @@ -1081,7 +1081,7 @@
        virtual unsigned int n_global_levels() const override
        Definition tria_base.cc:141
        IndexSet extract_locally_relevant_level_dofs(const DoFHandler< dim, spacedim > &dof_handler, const unsigned int level)
        -

        If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

        +

        If $(i,j)$ is an interface_out dof pair, then $(j,i)$ is an interface_in dof pair. Note: For interface_in, we load the transpose of the interface entries, i.e., the entry for dof pair $(j,i)$ is stored in interface_in(i,j). This is an optimization for the symmetric case which allows only one matrix to be used when setting the edge_matrices in solve(). Here, however, since our problem is non-symmetric, we must store both interface_in and interface_out matrices.

          for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
          for (unsigned int j = 0; j < copy_data.dofs_per_cell; ++j)
          if (mg_constrained_dofs.is_interface_matrix_entry(
        @@ -1445,8 +1445,8 @@
          }

        Results

        GMRES Iteration Numbers

        -

        The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

        -

        Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

        +

        The major advantage for GMG is that it is an $\mathcal{O}(n)$ method, that is, the complexity of the problem increases linearly with the problem size. To show then that the linear solver presented in this tutorial is in fact $\mathcal{O}(n)$, all one needs to do is show that the iteration counts for the GMRES solve stay roughly constant as we refine the mesh.

        +

        Each of the following tables gives the GMRES iteration counts to reduce the initial residual by a factor of $10^8$. We selected a sufficient number of smoothing steps (based on the method) to get iteration numbers independent of mesh size. As can be seen from the tables below, the method is indeed $\mathcal{O}(n)$.

        DoF/Cell Renumbering

        The point-wise smoothers ("Jacobi" and "SOR") get applied in the order the DoFs are numbered on each level. We can influence this using the DoFRenumbering namespace. The block smoothers are applied based on the ordering we set in setup_smoother(). We can visualize this numbering. The following pictures show the cell numbering of the active cells in downstream, random, and upstream numbering (left to right):

        @@ -1502,7 +1502,7 @@
        131072 132096 12 16 19 11 12 21
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-11-15 06:44:31.875693424 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_64.html 2024-11-15 06:44:31.875693424 +0000 @@ -140,12 +140,12 @@

        While we have tried for the interface of the matrix-free classes for the CPU and the GPU to be as close as possible, there are a few differences. When using the matrix-free framework on a GPU, one must write some CUDA code. However, the amount is fairly small and the use of CUDA is limited to a few keywords.

        The test case

        In this example, we consider the Helmholtz problem

        -\begin{eqnarray*} - \nabla \cdot
-\nabla u + a(\mathbf x) u &=&1,\\ u &=& 0 \quad \text{on } \partial \Omega \end{eqnarray*} +\begin{eqnarray*} - \nabla \cdot
+\nabla u + a(\mathbf x) u &=&1,\\ u &=& 0 \quad \text{on } \partial \Omega \end{eqnarray*}

        -

        where $a(\mathbf x)$ is a variable coefficient.

        -

        We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
-2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

        +

        where $a(\mathbf x)$ is a variable coefficient.

        +

        We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
+2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the origin but the domain is not, we will end up with a non-symmetric solution.

        If you've made it this far into the tutorial, you will know how the weak formulation of this problem looks like and how, in principle, one assembles linear systems for it. Of course, in this program we will in fact not actually form the matrix, but rather only represent its action when one multiplies with it.

        Moving data to and from the device

        GPUs (we will use the term "device" from now on to refer to the GPU) have their own memory that is separate from the memory accessible to the CPU (we will use the term "host" from now on). A normal calculation on the device can be divided in three separate steps:

          @@ -266,8 +266,8 @@
           
           
          constexpr T pow(const T base, const int iexp)
          Definition utilities.h:966
          -

          The following function implements this coefficient. Recall from the introduction that we have defined it as $a(\mathbf
-   x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$

          +

          The following function implements this coefficient. Recall from the introduction that we have defined it as $a(\mathbf
+   x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$

            template <int dim, int fe_degree>
            VaryingCoefficientFunctor<dim, fe_degree>::operator()(
          @@ -321,9 +321,9 @@
           

          The Helmholtz problem we want to solve here reads in weak form as follows:

          -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
    (\nabla v, \nabla u)+ (v, a(\mathbf x) u) &=&(v,1) \quad \forall v.
-   \end{eqnarray*} + \end{eqnarray*}" src="form_6398.png"/>

          If you have seen step-37, then it will be obvious that the two terms on the left-hand side correspond to the two function calls here:

            template <int dim, int fe_degree>
          @@ -666,7 +666,7 @@

          The output results function is as usual since we have already copied the values back from the GPU to the CPU.

          -

          While we're already doing something with the function, we might as well compute the $L_2$ norm of the solution. We do this by calling VectorTools::integrate_difference(). That function is meant to compute the error by evaluating the difference between the numerical solution (given by a vector of values for the degrees of freedom) and an object representing the exact solution. But we can easily compute the $L_2$ norm of the solution by passing in a zero function instead. That is, instead of evaluating the error $\|u_h-u\|_{L_2(\Omega)}$, we are just evaluating $\|u_h-0\|_{L_2(\Omega)}=\|u_h\|_{L_2(\Omega)}$ instead.

          +

          While we're already doing something with the function, we might as well compute the $L_2$ norm of the solution. We do this by calling VectorTools::integrate_difference(). That function is meant to compute the error by evaluating the difference between the numerical solution (given by a vector of values for the degrees of freedom) and an object representing the exact solution. But we can easily compute the $L_2$ norm of the solution by passing in a zero function instead. That is, instead of evaluating the error $\|u_h-u\|_{L_2(\Omega)}$, we are just evaluating $\|u_h-0\|_{L_2(\Omega)}=\|u_h\|_{L_2(\Omega)}$ instead.

            template <int dim, int fe_degree>
            void HelmholtzProblem<dim, fe_degree>::output_results(
            const unsigned int cycle) const
          @@ -804,7 +804,7 @@
          Number of degrees of freedom: 117649
          Solved in 227 iterations.
          solution norm: 0.0205261
          -

          One can make two observations here: First, the norm of the numerical solution converges, presumably to the norm of the exact (but unknown) solution. And second, the number of iterations roughly doubles with each refinement of the mesh. (This is in keeping with the expectation that the number of CG iterations grows with the square root of the condition number of the matrix; and that we know that the condition number of the matrix of a second-order differential operation grows like ${\cal O}(h^{-2})$.) This is of course rather inefficient, as an optimal solver would have a number of iterations that is independent of the size of the problem. But having such a solver would require using a better preconditioner than the identity matrix we have used here.

          +

          One can make two observations here: First, the norm of the numerical solution converges, presumably to the norm of the exact (but unknown) solution. And second, the number of iterations roughly doubles with each refinement of the mesh. (This is in keeping with the expectation that the number of CG iterations grows with the square root of the condition number of the matrix; and that we know that the condition number of the matrix of a second-order differential operation grows like ${\cal O}(h^{-2})$.) This is of course rather inefficient, as an optimal solver would have a number of iterations that is independent of the size of the problem. But having such a solver would require using a better preconditioner than the identity matrix we have used here.

          Possibilities for extensions

          Currently, this program uses no preconditioner at all. This is mainly since constructing an efficient matrix-free preconditioner is non-trivial. However, simple choices just requiring the diagonal of the corresponding matrix are good candidates and these can be computed in a matrix-free way as well. Alternatively, and maybe even better, one could extend the tutorial to use multigrid with Chebyshev smoothers similar to step-37.

          The plain program

          /usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-11-15 06:44:31.927693888 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_65.html 2024-11-15 06:44:31.927693888 +0000 @@ -205,14 +205,14 @@ (1-\xi)\eta (x_2,y_2) + \xi\eta (x_3,y_3). \end{align*}" src="form_6405.png"/>

          -

          For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

          +

          For the case of the curved surface, we want to modify this formula. For the top cell of the coarse mesh of the disk, we can assume that the points $(x_0,y_0)$ and $(x_1,y_1)$ sit along the straight line at the lower end and the points $(x_2,y_2)$ and $(x_3,y_3)$ are connected by a quarter circle along the top. We would then map a point $(\xi, \eta)$ as

          \begin{align*}
 (x,y) = (1-\eta) \big[(1-\xi) (x_0,y_0) + \xi (x_1,y_1)\big] +
       \eta \mathbf{c}_3(\xi),
 \end{align*}

          where $\mathbf{c}_3(\xi)$ is a curve that describes the $(x,y)$ coordinates of the quarter circle in terms of an arclength parameter $\xi\in (0,1)$. This represents a linear interpolation between the straight lower edge and the curved upper edge of the cell, and is the basis for the picture shown above.

          -

          This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate $\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
+<p>This formula is easily generalized to the case where all four edges are described by a curve rather than a straight line. We call the four functions, parameterized by a single coordinate <picture><source srcset=$\xi$ or $\eta$ in the horizontal and vertical directions, $\mathbf{c}_0, \mathbf{c}_1, \mathbf{c}_2,
 \mathbf{c}_3$ for the left, right, lower, and upper edge of a quadrilateral, respectively. The interpolation then reads

          \begin{align*}
 (x,y) =& (1-\xi)\mathbf{c}_0(\eta) + \xi \mathbf{c}_1(\eta)
@@ -225,11 +225,11 @@
 = (x_0,y_0)$ or $\mathbf{c}_0(1) = (x_2,y_2)$. The subtraction of the bilinear interpolation in the second line of the formula makes sure that the prescribed curves are followed exactly on the boundary: Along each of the four edges, we need to subtract the contribution of the two adjacent edges evaluated in the corners, which is then simply a vertex position. It is easy to check that the formula for the circle above is reproduced if three of the four curves $\mathbf{c}_i$ are straight and thus coincide with the bilinear interpolation.

          This formula, called transfinite interpolation, was introduced in 1973 by Gordon and Hall. Even though transfinite interpolation essentially only represents a linear blending of the bounding curves, the interpolation exactly follows the boundary curves for each real number $\xi\in (0,1)$ or $\eta\in (0,1)$, i.e., it interpolates in an infinite number of points, which was the original motivation to label this variant of interpolation a transfinite one by Gordon and Hall. Another interpretation is that the transfinite interpolation interpolates from the left and right and the top and bottom linearly, from which we need to subtract the bilinear interpolation to ensure a unit weight in the interior of the domain.

          The transfinite interpolation is easily generalized to three spatial dimensions. In that case, the interpolation allows to blend 6 different surface descriptions for any of the quads of a three-dimensional cell and 12 edge descriptions for the lines of a cell. Again, to ensure a consistent map, it is necessary to subtract the contribution of edges and add the contribution of vertices again to make the curves follow the prescribed surface or edge description. In the three-dimensional case, it is also possible to use a transfinite interpolation from a curved edge both into the adjacent faces and the adjacent cells.

          -

          The interpolation of the transfinite interpolation in deal.II is general in the sense that it can deal with arbitrary curves. It will evaluate the curves in terms of their original coordinates of the $d$-dimensional space but with one (or two, in the case of edges in 3D) coordinate held fixed at $0$ or $1$ to ensure that any other manifold class, including CAD files if desired, can be applied out of the box. Transfinite interpolation is a standard ingredient in mesh generators, so the main strength of the integration of this feature within the deal.II library is to enable it during adaptive refinement and coarsening of the mesh, and for creating higher-degree mappings that use manifolds to insert additional points beyond the mesh vertices.

          +

          The interpolation of the transfinite interpolation in deal.II is general in the sense that it can deal with arbitrary curves. It will evaluate the curves in terms of their original coordinates of the $d$-dimensional space but with one (or two, in the case of edges in 3D) coordinate held fixed at $0$ or $1$ to ensure that any other manifold class, including CAD files if desired, can be applied out of the box. Transfinite interpolation is a standard ingredient in mesh generators, so the main strength of the integration of this feature within the deal.II library is to enable it during adaptive refinement and coarsening of the mesh, and for creating higher-degree mappings that use manifolds to insert additional points beyond the mesh vertices.

          As a final remark on transfinite interpolation, we mention that the mesh refinement strategies in deal.II in absence of a volume manifold description are also based on the weights of the transfinite interpolation and optimal in that sense. The difference is that the default algorithm sees only one cell at a time, and so will apply the optimal algorithm only on those cells touching the curved manifolds. In contrast, using the transfinite mapping on entire patches of cells (originating from one coarser cell) allows to use the transfinite interpolation method in a way that propagates information from the boundary to cells far away.

          Transfinite interpolation is expensive and how to deal with it

          A mesh with a transfinite manifold description is typically set up in two steps. The first step is to create a coarse mesh (or read it in from a file) and to attach a curved manifold to some of the mesh entities. For the above example of the disk, we attach a polar manifold to the faces along the outer circle (this is done automatically by GridGenerator::hyper_ball()). Before we start refining the mesh, we then assign a TransfiniteInterpolationManifold to all interior cells and edges of the mesh, which of course needs to be based on some manifold id that we have assigned to those entities (everything except the circle on the boundary). It does not matter whether we also assign a TransfiniteInterpolationManifold to the inner square of the disk or not because the transfinite interpolation on a coarse cell with straight edges (or flat faces in 3d) simply yields subdivided children with straight edges (flat faces).

          -

          Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

          +

          Later, when the mesh is refined or when a higher-order mapping is set up based on this mesh, the cells will query the underlying manifold object for new points. This process takes a set of surrounding points, for example the four vertices of a two-dimensional cell, and a set of weights to each of these points, for definition a new point. For the mid point of a cell, each of the four vertices would get weight 0.25. For the transfinite interpolation manifold, the process of building weighted sums requires some serious work. By construction, we want to combine the points in terms of the reference coordinates $\xi$ and $\eta$ (or $\xi, \eta, \zeta$ in 3D) of the surrounding points. However, the interface of the manifold classes in deal.II does not get the reference coordinates of the surrounding points (as they are not stored globally) but rather the physical coordinates only. Thus, the first step the transfinite interpolation manifold has to do is to invert the mapping and find the reference coordinates within one of the coarse cells of the transfinite interpolation (e.g. one of the four shaded coarse-grid cells of the disk mesh above). This inversion is done by a Newton iteration (or rather, finite-difference based Newton scheme combined with Broyden's method) and queries the transfinite interpolation according to the formula above several times. Each of these queries in turn might call an expensive manifold, e.g. a spherical description of a ball, and be expensive on its own. Since the Manifold interface class of deal.II only provides a set of points, the transfinite interpolation initially does not even know to which coarse grid cell the set of surrounding points belong to and needs to search among several cells based on some heuristics. In terms of charts, one could describe the implementation of the transfinite interpolation as an atlas-based implementation: Each cell of the initial coarse grid of the triangulation represents a chart with its own reference space, and the surrounding manifolds provide a way to transform from the chart space (i.e., the reference cell) to the physical space. The collection of the charts of the coarse grid cells is an atlas, and as usual, the first thing one does when looking up something in an atlas is to find the right chart.

          Once the reference coordinates of the surrounding points have been found, a new point in the reference coordinate system is computed by a simple weighted sum. Finally, the reference point is inserted into the formula for the transfinite interpolation, which gives the desired new point.

          In a number of cases, the curved manifold is not only used during mesh refinement, but also to ensure a curved representation of boundaries within the cells of the computational domain. This is a necessity to guarantee high-order convergence for high-order polynomials on complex geometries anyway, but sometimes an accurate geometry is also desired with linear shape functions. This is often done by polynomial descriptions of the cells and called the isoparametric concept if the polynomial degree to represent the curved mesh elements is the same as the degree of the polynomials for the numerical solution. If the degree of the geometry is higher or lower than the solution, one calls that a super- or sub-parametric geometry representation, respectively. In deal.II, the standard class for polynomial representation is MappingQ. If, for example, this class is used with polynomial degree $4$ in 3D, a total of 125 (i.e., $(4+1)^3$) points are needed for the interpolation. Among these points, 8 are the cell's vertices and already available from the mesh, but the other 117 need to be provided by the manifold. In case the transfinite interpolation manifold is used, we can imagine that going through the pull-back into reference coordinates of some yet to be determined coarse cell, followed by subsequent push-forward on each of the 117 points, is a lot of work and can be very time consuming.

          What makes things worse is that the structure of many programs is such that the mapping is queried several times independently for the same cell. Its primary use is in the assembly of the linear system, i.e., the computation of the system matrix and the right hand side, via the mapping argument of the FEValues object. However, also the interpolation of boundary values, the computation of numerical errors, writing the output, and evaluation of error estimators must involve the same mapping to ensure a consistent interpretation of the solution vectors. Thus, even a linear stationary problem that is solved once will evaluate the points of the mapping several times. For the cubic case in 3D mentioned above, this means computing 117 points per cell by an expensive algorithm many times. The situation is more pressing for nonlinear or time-dependent problems where those operations are done over and over again.

          @@ -485,7 +485,7 @@ \sum_{k=1}^d\text{det}(J) w_q a(x)\frac{\partial \varphi_i(\boldsymbol \xi_q)}{\partial x_k} \frac{\partial \varphi_j(\boldsymbol \xi_q)}{\partial x_k}$" src="form_6432.png"/>, which is exactly the terms needed for the bilinear form of the Laplace equation.

          -

          The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

          +

          The reason for choosing this somewhat unusual scheme is due to the heavy work involved in computing the cell matrix for a relatively high polynomial degree in 3d. As we want to highlight the cost of the mapping in this tutorial program, we better do the assembly in an optimized way in order to not chase bottlenecks that have been solved by the community already. Matrix-matrix multiplication is one of the best optimized kernels in the HPC context, and the FullMatrix::mTmult() function will call into those optimized BLAS functions. If the user has provided a good BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the computation of the cell matrix will execute close to the processor's peak arithmetic performance. As a side note, we mention that despite an optimized matrix-matrix multiplication, the current strategy is sub-optimal in terms of complexity as the work to be done is proportional to $(p+1)^9$ operations for degree $p$ (this also applies to the usual evaluation with FEValues). One could compute the cell matrix with $\mathcal O((p+1)^7)$ operations by utilizing the tensor product structure of the shape functions, as is done by the matrix-free framework in deal.II. We refer to step-37 and the documentation of the tensor-product-aware evaluators FEEvaluation for details on how an even more efficient cell matrix computation could be realized.

            template <int dim>
            void PoissonProblem<dim>::assemble_system(const Mapping<dim> &mapping)
            {
          @@ -615,7 +615,7 @@
          IteratorRange< active_cell_iterator > active_cell_iterators() const
          -

          The next operation in the postprocessing function is to compute the $L_2$ and $H^1$ errors against the analytical solution. As the analytical solution is a quadratic polynomial, we expect a very accurate result at this point. If we were solving on a simple mesh with planar faces and a coefficient whose jumps are aligned with the faces between cells, then we would expect the numerical result to coincide with the analytical solution up to roundoff accuracy. However, since we are using deformed cells following a sphere, which are only tracked by polynomials of degree 4 (one more than the degree for the finite elements), we will see that there is an error around $10^{-7}$. We could get more accuracy by increasing the polynomial degree or refining the mesh.

          +

          The next operation in the postprocessing function is to compute the $L_2$ and $H^1$ errors against the analytical solution. As the analytical solution is a quadratic polynomial, we expect a very accurate result at this point. If we were solving on a simple mesh with planar faces and a coefficient whose jumps are aligned with the faces between cells, then we would expect the numerical result to coincide with the analytical solution up to roundoff accuracy. However, since we are using deformed cells following a sphere, which are only tracked by polynomials of degree 4 (one more than the degree for the finite elements), we will see that there is an error around $10^{-7}$. We could get more accuracy by increasing the polynomial degree or refining the mesh.

            {
            TimerOutput::Scope scope(timer, "Compute error norms");
           
          /usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-11-15 06:44:31.999694531 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_66.html 2024-11-15 06:44:31.999694531 +0000 @@ -172,7 +172,7 @@

          This problem is also called the Gelfand problem and is a typical example for problems from combustion theory, see for example [bebernes1989mathematical].

          Discretization with finite elements

          -

          As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function $v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

          +

          As usual, we first derive the weak formulation for this problem by multiplying with a smooth test function $v\colon\Omega\to\mathbb{R}$ respecting the boundary condition and integrating over the domain $\Omega$. Integration by parts and putting the term from the right hand side to the left yields the weak formulation: Find a function $u\colon\Omega\to\mathbb{R}$ such that for all test functions $v$ it holds:

          \begin{align*}
  \int_\Omega \nabla v \cdot \nabla u \,\mathrm{d}x
  -
@@ -199,8 +199,8 @@
  &\text{Update: }                       & u_h^{n+1} &= u_h^n + s_h^n.
 \end{align*}

          -

          So in each Newton step we have to solve a linear problem $A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

          -

          Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

          +

          So in each Newton step we have to solve a linear problem $A\,x = b$, where the system matrix $A$ is represented by the Jacobian $F'(u_h^n)[\,\cdot\,]\colon\mathbb{R}^N\to\mathbb{R}^N$ and the right hand side $b$ by the negative residual $-F(u_h^n)$. The solution vector $x$ is in that case the Newton update of the $n$-th Newton step. Note, that we assume an initial guess $u_h^0$, which already fulfills the Dirichlet boundary conditions of the problem formulation (in fact this could also be an inhomogeneous Dirichlet boundary condition) and thus the Newton updates $s_h$ satisfy a homogeneous Dirichlet condition.

          +

          Until now we only tested with the basis functions, however, we can also represent any function of $V_h$ as linear combination of basis functions. More mathematically this means, that every element of $V_h$ can be identified with a vector $U\in\mathbb{R}^N$ via the representation formula: $u_h = \sum_{i=1}^N U_i \varphi_i$. So using this we can give an expression for the discrete Jacobian and the residual:

          \begin{align*}
  A_{ij} = \bigl( F'(u_h^n) \bigr)_{ij}
  &=
@@ -921,7 +921,7 @@
 <div class= 

           

          GelfandProblem::compute_residual

          -

          According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

          +

          According to step-15 the following function computes the norm of the nonlinear residual for the solution $u_h^n + \alpha s_h^n$ with the help of the evaluate_residual() function. The Newton step length $\alpha$ becomes important if we would use an adaptive version of the Newton method. Then for example we would compute the residual for different step lengths and compare the residuals. However, for our problem the full Newton step with $\alpha=1$ is the best we can do. An adaptive version of Newton's method becomes interesting if we have no good initial value. Note that in theory Newton's method converges with quadratic order, but only if we have an appropriate initial value. For unsuitable initial values the Newton method diverges even with quadratic order. A common way is then to use a damped version $\alpha<1$ until the Newton step is good enough and the full Newton step can be performed. This was also discussed in step-15.

            template <int dim, int fe_degree>
            double GelfandProblem<dim, fe_degree>::compute_residual(const double alpha)
            {
          @@ -1063,9 +1063,9 @@
            TimerOutput::Scope t(computing_timer, "solve");
           
           
          -

          We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
-   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
-   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

          +

          We define a maximal number of Newton steps and tolerances for the convergence criterion. Usually, with good starting values, the Newton method converges in three to six steps, so maximal ten steps should be totally sufficient. As tolerances we use $\|F(u^n_h)\|<\text{TOL}_f =
+   10^{-12}$ for the norm of the residual and $\|s_h^n\| < \text{TOL}_x =
+   10^{-10}$ for the norm of the Newton update. This seems a bit over the top, but we will see that, for our example, we will achieve these tolerances after a few steps.

            const unsigned int itmax = 10;
            const double TOLf = 1e-12;
            const double TOLx = 1e-10;
          @@ -1085,7 +1085,7 @@
            compute_update();
           
           
          -

          Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

          +

          Then we compute the errors, namely the norm of the Newton update and the residual. Note that at this point one could incorporate a step size control for the Newton method by varying the input parameter $\alpha$ for the compute_residual function. However, here we just use $\alpha$ equal to one for a plain Newton iteration.

            const double ERRx = newton_update.l2_norm();
            const double ERRf = compute_residual(1.0);
           
          @@ -1473,17 +1473,17 @@
          +---------------------------------+-----------+------------+------------+

          We show the solution for the two- and three-dimensional problem in the following figure.

          Solution of the two-dimensional Gelfand problem.
          Solution of the three-dimensional Gelfand problem.

          Newton solver

          -

          In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

          -

          We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

          +

          In the program output above we find some interesting information about the Newton iterations. The terminal output in each refinement cycle presents detailed diagnostics of the Newton method, which show first of all the number of Newton steps and for each step the norm of the residual $\|F(u_h^{n+1})\|$, the norm of the Newton update $\|s_h^n\|$, and the number of CG iterations it.

          +

          We observe that for all cases the Newton method converges in approximately three to four steps, which shows the quadratic convergence of the Newton method with a full step length $\alpha = 1$. However, be aware that for a badly chosen initial guess $u_h^0$, the Newton method will also diverge quadratically. Usually if you do not have an appropriate initial guess, you try a few damped Newton steps with a reduced step length $\alpha < 1$ until the Newton step is again in the quadratic convergence domain. This damping and relaxation of the Newton step length truly requires a more sophisticated implementation of the Newton method, which we designate to you as a possible extension of the tutorial.

          Furthermore, we see that the number of CG iterations is approximately constant with successive mesh refinements and an increasing number of DoFs. This is of course due to the geometric multigrid preconditioner and similar to the observations made in other tutorials that use this method, e.g., step-16 and step-37. Just to give an example, in the three-dimensional case after five refinements, we have approximately 14.7 million distributed DoFs with fourth-order Lagrangian finite elements, but the number of CG iterations is still less than ten.

          In addition, there is one more very useful optimization that we applied and that should be mentioned here. In the compute_update() function we explicitly reset the vector holding the Newton update before passing it as the output vector to the solver. In that case we use a starting value of zero for the CG method, which is more suitable than the previous Newton update, the actual content of the newton_update before resetting, and thus reduces the number of CG iterations by a few steps.

          Possibilities for extensions

          A couple of possible extensions are available concerning minor updates to the present code as well as a deeper numerical investigation of the Gelfand problem.

          More sophisticated Newton iteration

          Beside a step size controlled version of the Newton iteration as mentioned already in step-15 (and actually implemented, with many more bells and whistles, in step-77), one could also implement a more flexible stopping criterion for the Newton iteration. For example one could replace the fixed tolerances for the residual TOLf and for the Newton updated TOLx and implement a mixed error control with a given absolute and relative tolerance, such that the Newton iteration exits with success as, e.g.,

          -\begin{align*}
+<picture><source srcset=\begin{align*}
   \|F(u_h^{n+1})\| \leq \texttt{RelTol} \|u_h^{n+1}\| + \texttt{AbsTol}.
-\end{align*} +\end{align*}" src="form_6469.png"/>

          For more advanced applications with many nonlinear systems to solve, for example at each time step for a time-dependent problem, it turns out that it is not necessary to set up and assemble the Jacobian anew at every single Newton step or even for each time step. Instead, the existing Jacobian from a previous step can be used for the Newton iteration. The Jacobian is then only rebuilt if, for example, the Newton iteration converges too slowly. Such an idea yields a quasi-Newton method. Admittedly, when using the matrix-free framework, the assembly of the Jacobian is omitted anyway, but with in this way one can try to optimize the reassembly of the geometric multigrid preconditioner. Remember that each time the solution from the old Newton step must be distributed to all levels and the mutligrid preconditioner must be reinitialized.

          Parallel scalability and thread parallelism

          @@ -1491,9 +1491,9 @@

          Comparison to matrix-based methods

          Analogously to step-50 and the mentioned possible extension of step-75, you can convince yourself which method is faster.

          Eigenvalue problem

          -

          One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
-\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation -method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

          +

          One can consider the corresponding eigenvalue problem, which is called Bratu problem. For example, if we define a fixed eigenvalue $\lambda\in[0,6]$, we can compute the corresponding discrete eigenfunction. You will notice that the number of Newton steps will increase with increasing $\lambda$. To reduce the number of Newton steps you can use the following trick: start from a certain $\lambda$, compute the eigenfunction, increase $\lambda=\lambda +
+\delta_\lambda$, and then use the previous solution as an initial guess for the Newton iteration – this approach is called a "continuation +method". In the end you can plot the $H^1(\Omega)$-norm over the eigenvalue $\lambda \mapsto \|u_h\|_{H^1(\Omega)}$. What do you observe for further increasing $\lambda>7$?

          The plain program

          /* ------------------------------------------------------------------------
          *
          /usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-11-15 06:44:32.119695603 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_67.html 2024-11-15 06:44:32.119695603 +0000 @@ -157,15 +157,15 @@

          This tutorial program solves the Euler equations of fluid dynamics using an explicit time integrator with the matrix-free framework applied to a high-order discontinuous Galerkin discretization in space. For details about the Euler system and an alternative implicit approach, we also refer to the step-33 tutorial program. You might also want to look at step-69 for an alternative approach to solving these equations.

          The Euler equations

          The Euler equations are a conservation law, describing the motion of a compressible inviscid gas,

          -\[
+<picture><source srcset=\[
 \frac{\partial \mathbf{w}}{\partial t} + \nabla \cdot \mathbf{F}(\mathbf{w}) =
 \mathbf{G}(\mathbf w),
-\] +\]" src="form_6474.png"/>

          -

          where the $d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
-u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

          -

          The Euler flux function, a $(d+2)\times d$ matrix, is defined as

          -\[
+<p> where the <picture><source srcset=$d+2$ components of the solution vector are $\mathbf{w}=(\rho, \rho
+u_1,\ldots,\rho u_d,E)^{\mathrm T}$. Here, $\rho$ denotes the fluid density, ${\mathbf u}=(u_1,\ldots, u_d)^\mathrm T$ the fluid velocity, and $E$ the energy density of the gas. The velocity is not directly solved for, but rather the variable $\rho \mathbf{u}$, the linear momentum (since this is the conserved quantity).

          +

          The Euler flux function, a $(d+2)\times d$ matrix, is defined as

          +\[
   \mathbf F(\mathbf w)
   =
   \begin{pmatrix}
@@ -173,10 +173,10 @@
   \rho \mathbf{u} \otimes \mathbf{u} + \mathbb{I}p\\
   (E+p)\mathbf{u}
   \end{pmatrix}
-\] +\]" src="form_6480.png"/>

          -

          with $\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

          -\[
+<p> with <picture><source srcset=$\mathbb{I}$ the $d\times d$ identity matrix and $\otimes$ the outer product; its components denote the mass, momentum, and energy fluxes, respectively. The right hand side forcing is given by

          +\[
   \mathbf G(\mathbf w)
   =
   \begin{pmatrix}
@@ -184,53 +184,53 @@
   \rho\mathbf{g}\\
   \rho \mathbf{u} \cdot \mathbf{g}
   \end{pmatrix},
-\] +\]" src="form_6482.png"/>

          -

          where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

          -

          The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
-\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

          +

          where the vector $\mathbf g$ denotes the direction and magnitude of gravity. It could, however, also denote any other external force per unit mass that is acting on the fluid. (Think, for example, of the electrostatic forces exerted by an external electric field on charged particles.)

          +

          The three blocks of equations, the second involving $d$ components, describe the conservation of mass, momentum, and energy. The pressure is not a solution variable but needs to be expressed through a "closure relationship" by the other variables; we here choose the relationship appropriate for a gas with molecules composed of two atoms, which at moderate temperatures is given by $p=(\gamma - 1) \left(E-\frac 12 \rho
+\mathbf{u}\cdot \mathbf{u}\right)$ with the constant $\gamma = 1.4$.

          High-order discontinuous Galerkin discretization

          For spatial discretization, we use a high-order discontinuous Galerkin (DG) discretization, using a solution expansion of the form

          -\[
+<picture><source srcset=\[
 \mathbf{w}_h(\mathbf{x}, t) =
 \sum_{j=1}^{n_\mathbf{dofs}} \boldsymbol{\varphi}_j(\mathbf{x}) {w}_j(t).
-\] +\]" src="form_6486.png"/>

          -

          Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

          +

          Here, $\boldsymbol{\varphi}_j$ denotes the $j$th basis function, written in vector form with separate shape functions for the different components and letting $w_j(t)$ go through the density, momentum, and energy variables, respectively. In this form, the space dependence is contained in the shape functions and the time dependence in the unknown coefficients $w_j$. As opposed to the continuous finite element method where some shape functions span across element boundaries, the shape functions are local to a single element in DG methods, with a discontinuity from one element to the next. The connection of the solution from one cell to its neighbors is instead imposed by the numerical fluxes specified below. This allows for some additional flexibility, for example to introduce directionality in the numerical method by, e.g., upwinding.

          DG methods are popular methods for solving problems of transport character because they combine low dispersion errors with controllable dissipation on barely resolved scales. This makes them particularly attractive for simulation in the field of fluid dynamics where a wide range of active scales needs to be represented and inadequately resolved features are prone to disturb the important well-resolved features. Furthermore, high-order DG methods are well-suited for modern hardware with the right implementation. At the same time, DG methods are no silver bullet. In particular when the solution develops discontinuities (shocks), as is typical for the Euler equations in some flow regimes, high-order DG methods tend to oscillatory solutions, like all high-order methods when not using flux- or slope-limiters. This is a consequence of Godunov's theorem that states that any total variation limited (TVD) scheme that is linear (like a basic DG discretization) can at most be first-order accurate. Put differently, since DG methods aim for higher order accuracy, they cannot be TVD on solutions that develop shocks. Even though some communities claim that the numerical flux in DG methods can control dissipation, this is of limited value unless all shocks in a problem align with cell boundaries. Any shock that passes through the interior of cells will again produce oscillatory components due to the high-order polynomials. In the finite element and DG communities, there exist a number of different approaches to deal with shocks, for example the introduction of artificial diffusion on troubled cells (using a troubled-cell indicator based e.g. on a modal decomposition of the solution), a switch to dissipative low-order finite volume methods on a subgrid, or the addition of some limiting procedures. Given the ample possibilities in this context, combined with the considerable implementation effort, we here refrain from the regime of the Euler equations with pronounced shocks, and rather concentrate on the regime of subsonic flows with wave-like phenomena. For a method that works well with shocks (but is more expensive per unknown), we refer to the step-69 tutorial program.

          For the derivation of the DG formulation, we multiply the Euler equations with test functions $\mathbf{v}$ and integrate over an individual cell $K$, which gives

          -\[
+<picture><source srcset=\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 + \left(\mathbf{v}, \nabla \cdot \mathbf{F}(\mathbf{w})\right)_{K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6489.png"/>

          We then integrate the second term by parts, moving the divergence from the solution slot to the test function slot, and producing an integral over the element boundary:

          -\[
+<picture><source srcset=\[
 \left(\mathbf{v}, \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 - \left(\nabla \mathbf{v}, \mathbf{F}(\mathbf{w})\right)_{K}
 + \left<\mathbf{v}, \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})
 \right>_{\partial K} =
 \left(\mathbf{v},\mathbf{G}(\mathbf w)\right)_{K}.
-\] +\]" src="form_6490.png"/>

          -

          In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
-\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
-w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
-\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

          -

          There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
-O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

          +

          In the surface integral, we have replaced the term $\mathbf{F}(\mathbf w)$ by the term $\widehat{\mathbf{F}}(\mathbf w)$, the numerical flux. The role of the numerical flux is to connect the solution on neighboring elements and weakly impose continuity of the solution. This ensures that the global coupling of the PDE is reflected in the discretization, despite independent basis functions on the cells. The connectivity to the neighbor is included by defining the numerical flux as a function $\widehat{\mathbf{F}}(\mathbf w^-,
+\mathbf w^+)$ of the solution from both sides of an interior face, $\mathbf
+w^-$ and $\mathbf w^+$. A basic property we require is that the numerical flux needs to be conservative. That is, we want all information (i.e., mass, momentum, and energy) that leaves a cell over a face to enter the neighboring cell in its entirety and vice versa. This can be expressed as $\widehat{\mathbf{F}}(\mathbf w^-, \mathbf w^+) =
+\widehat{\mathbf{F}}(\mathbf w^+, \mathbf w^-)$, meaning that the numerical flux evaluates to the same result from either side. Combined with the fact that the numerical flux is multiplied by the unit outer normal vector on the face under consideration, which points in opposite direction from the two sides, we see that the conservation is fulfilled. An alternative point of view of the numerical flux is as a single-valued intermediate state that links the solution weakly from both sides.

          +

          There is a large number of numerical flux functions available, also called Riemann solvers. For the Euler equations, there exist so-called exact Riemann solvers – meaning that the states from both sides are combined in a way that is consistent with the Euler equations along a discontinuity – and approximate Riemann solvers, which violate some physical properties and rely on other mechanisms to render the scheme accurate overall. Approximate Riemann solvers have the advantage of being cheaper to compute. Most flux functions have their origin in the finite volume community, which are similar to DG methods with polynomial degree 0 within the cells (called volumes). As the volume integral of the Euler operator $\mathbf{F}$ would disappear for constant solution and test functions, the numerical flux must fully represent the physical operator, explaining why there has been a large body of research in that community. For DG methods, consistency is guaranteed by higher order polynomials within the cells, making the numerical flux less of an issue and usually affecting only the convergence rate, e.g., whether the solution converges as $\mathcal O(h^p)$, $\mathcal O(h^{p+1/2})$ or $\mathcal
+O(h^{p+1})$ in the $L_2$ norm for polynomials of degree $p$. The numerical flux can thus be seen as a mechanism to select more advantageous dissipation/dispersion properties or regarding the extremal eigenvalue of the discretized and linearized operator, which affect the maximal admissible time step size in explicit time integrators.

          In this tutorial program, we implement two variants of fluxes that can be controlled via a switch in the program (of course, it would be easy to make them a run time parameter controlled via an input file). The first flux is the local Lax–Friedrichs flux

          -\[
+<picture><source srcset=\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{\mathbf{F}(\mathbf{w}^-)+\mathbf{F}(\mathbf{w}^+)}{2} +
    \frac{\lambda}{2}\left[\mathbf{w}^--\mathbf{w}^+\right]\otimes
    \mathbf{n^-}.
-\] +\]" src="form_6499.png"/>

          -

          In the original definition of the Lax–Friedrichs flux, a factor $\lambda =
-\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

          -\begin{align*}
+<p>In the original definition of the Lax–Friedrichs flux, a factor  <picture><source srcset=$\lambda =
+\max\left(\|\mathbf{u}^-\|+c^-, \|\mathbf{u}^+\|+c^+\right)$ is used (corresponding to the maximal speed at which information is moving on the two sides of the interface), stating that the difference between the two states, $[\![\mathbf{w}]\!]$ is penalized by the largest eigenvalue in the Euler flux, which is $\|\mathbf{u}\|+c$, where $c=\sqrt{\gamma p / \rho}$ is the speed of sound. In the implementation below, we modify the penalty term somewhat, given that the penalty is of approximate nature anyway. We use

          +\begin{align*}
 \lambda
 &=
 \frac{1}{2}\max\left(\sqrt{\|\mathbf{u^-}\|^2+(c^-)^2},
@@ -239,52 +239,52 @@
 &=
 \frac{1}{2}\sqrt{\max\left(\|\mathbf{u^-}\|^2+(c^-)^2,
                            \|\mathbf{u}^+\|^2+(c^+)^2\right)}.
-\end{align*} +\end{align*}" src="form_6504.png"/>

          -

          The additional factor $\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
+<p> The additional factor <picture><source srcset=$\frac 12$ reduces the penalty strength (which results in a reduced negative real part of the eigenvalues, and thus increases the admissible time step size). Using the squares within the sums allows us to reduce the number of expensive square root operations, which is 4 for the original Lax–Friedrichs definition, to a single one. This simplification leads to at most a factor of 2 in the reduction of the parameter $\lambda$, since $\|\mathbf{u}\|^2+c^2 \leq
 \|\mathbf{u}\|^2+2 c |\mathbf{u}\| + c^2 = \left(\|\mathbf{u}\|+c\right)^2
-\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

          -

          The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
-\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
-\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

          -\[
+\leq 2 \left(\|\mathbf{u}\|^2+c^2\right)$, with the last inequality following from Young's inequality.

          +

          The second numerical flux is one proposed by Harten, Lax and van Leer, called the HLL flux. It takes the different directions of propagation of the Euler equations into account, depending on the speed of sound. It utilizes some intermediate states $\bar{\mathbf{u}}$ and $\bar{c}$ to define the two branches $s^\mathrm{p} = \max\left(0, \bar{\mathbf{u}}\cdot \mathbf{n} +
+\bar{c}\right)$ and $s^\mathrm{n} = \min\left(0, \bar{\mathbf{u}}\cdot
+\mathbf{n} - \bar{c}\right)$. From these branches, one then defines the flux

          +\[
 \hat{\mathbf{F}}(\mathbf{w}^-,\mathbf{w}^+) =
 \frac{s^\mathrm{p} \mathbf{F}(\mathbf{w}^-)-s^\mathrm{n} \mathbf{F}(\mathbf{w}^+)}
                    {s^\mathrm p - s^\mathrm{n} } +
 \frac{s^\mathrm{p} s^\mathrm{n}}{s^\mathrm{p}-s^\mathrm{n}}
 \left[\mathbf{w}^--\mathbf{w}^+\right]\otimes \mathbf{n^-}.
-\] +\]" src="form_6510.png"/>

          -

          Regarding the definition of the intermediate state $\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
+<p> Regarding the definition of the intermediate state <picture><source srcset=$\bar{\mathbf{u}}$ and $\bar{c}$, several variants have been proposed. The variant originally proposed uses a density-averaged definition of the velocity, $\bar{\mathbf{u}}
 = \frac{\sqrt{\rho^-} \mathbf{u}^- + \sqrt{\rho^+}\mathbf{u}^+}{\sqrt{\rho^-}
-+ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
-\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
-\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

          -

          At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

          -\[
++ \sqrt{\rho^+}}$. Since we consider the Euler equations without shocks, we simply use arithmetic means, $\bar{\mathbf{u}} = \frac{\mathbf{u}^- +
+\mathbf{u}^+}{2}$ and $\bar{c} = \frac{c^- + c^+}{2}$, with $c^{\pm} =
+\sqrt{\gamma p^{\pm} / \rho^{\pm}}$, in this tutorial program, and leave other variants to a possible extension. We also note that the HLL flux has been extended in the literature to the so-called HLLC flux, where C stands for the ability to represent contact discontinuities.

          +

          At the boundaries with no neighboring state $\mathbf{w}^+$ available, it is common practice to deduce suitable exterior values from the boundary conditions (see the general literature on DG methods for details). In this tutorial program, we consider three types of boundary conditions, namely inflow boundary conditions where all components are prescribed,

          +\[
 \mathbf{w}^+ = \begin{pmatrix} \rho_\mathrm{D}(t)\\
 (\rho \mathbf u)_{\mathrm D}(t) \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(Dirichlet)},
-\] +\]" src="form_6516.png"/>

          subsonic outflow boundaries, where we do not prescribe exterior solutions as the flow field is leaving the domain and use the interior values instead; we still need to prescribe the energy as there is one incoming characteristic left in the Euler flux,

          -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- \\ E_\mathrm{D}(t)\end{pmatrix} \quad
  \text{(mixed Neumann/Dirichlet)},
-\] +\]" src="form_6517.png"/>

          and wall boundary condition which describe a no-penetration configuration:

          -\[
+<picture><source srcset=\[
 \mathbf{w}^+ = \begin{pmatrix} \rho^-\\
 (\rho \mathbf u)^- - 2 [(\rho \mathbf u)^-\cdot \mathbf n] \mathbf{n}
  \\ E^-\end{pmatrix}.
-\] +\]" src="form_6518.png"/>

          -

          The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

          +

          The polynomial expansion of the solution is finally inserted to the weak form and test functions are replaced by the basis functions. This gives a discrete in space, continuous in time nonlinear system with a finite number of unknown coefficient values $w_j$, $j=1,\ldots,n_\text{dofs}$. Regarding the choice of the polynomial degree in the DG method, there is no consensus in literature as of 2019 as to what polynomial degrees are most efficient and the decision is problem-dependent. Higher order polynomials ensure better convergence rates and are thus superior for moderate to high accuracy requirements for smooth solutions. At the same time, the volume-to-surface ratio of where degrees of freedom are located, increases with higher degrees, and this makes the effect of the numerical flux weaker, typically reducing dissipation. However, in most of the cases the solution is not smooth, at least not compared to the resolution that can be afforded. This is true for example in incompressible fluid dynamics, compressible fluid dynamics, and the related topic of wave propagation. In this pre-asymptotic regime, the error is approximately proportional to the numerical resolution, and other factors such as dispersion errors or the dissipative behavior become more important. Very high order methods are often ruled out because they come with more restrictive CFL conditions measured against the number of unknowns, and they are also not as flexible when it comes to representing complex geometries. Therefore, polynomial degrees between two and six are most popular in practice, see e.g. the efficiency evaluation in [FehnWallKronbichler2019] and references cited therein.

          Explicit time integration

          To discretize in time, we slightly rearrange the weak form and sum over all cells:

          -\[
+<picture><source srcset=\[
 \sum_{K \in \mathcal T_h} \left(\boldsymbol{\varphi}_i,
 \frac{\partial \mathbf{w}}{\partial t}\right)_{K}
 =
@@ -295,13 +295,13 @@
 \mathbf{n} \cdot \widehat{\mathbf{F}}(\mathbf{w})\right>_{\partial K} +
 \left(\boldsymbol{\varphi}_i,\mathbf{G}(\mathbf w)\right)_{K}
 \right],
-\] +\]" src="form_6520.png"/>

          -

          where $\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

          -

          We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
+<p> where <picture><source srcset=$\boldsymbol{\varphi}_i$ runs through all basis functions with from 1 to $n_\text{dofs}$.

          +

          We now denote by $\mathcal M$ the mass matrix with entries $\mathcal M_{ij} =
 \sum_{K} \left(\boldsymbol{\varphi}_i,
/usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-11-15 06:44:32.167696031 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_68.html	2024-11-15 06:44:32.167696031 +0000
@@ -153,17 +153,17 @@
 <p><a class=

          Introduction

          Simulation of the motion of massless tracer particles in a vortical flow

          Particles play an important part in numerical models for a large number of applications. Particles are routinely used as massless tracers to visualize the dynamic of a transient flow. They can also play an intrinsic role as part of a more complex finite element model, as is the case for the Particle-In-Cell (PIC) method [GLHPW2018] or they can even be used to simulate the motion of granular matter, as in the Discrete Element Method (DEM) [Blais2019]. In the case of DEM, the resulting model is not related to the finite element method anymore, but just leads to a system of ordinary differential equation which describes the motion of the particles and the dynamic of their collisions. All of these models can be built using deal.II's particle handling capabilities.

          -

          In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle $i$ is described by the following ordinary differential equation (ODE):

          -\[
+<p>In the present step, we use particles as massless tracers to illustrate the dynamic of a vortical flow. Since the particles are massless tracers, the position of each particle <picture><source srcset=$i$ is described by the following ordinary differential equation (ODE):

          +\[
 \frac{d \textbf{x}_i}{dt} =\textbf{u}(\textbf{x}_i)
-\] +\]" src="form_6636.png"/>

          -

          where $\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

          -\[
+<p>where <picture><source srcset=$\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position. In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:

          +\[
 \textbf{x}_{i}^{n+1} = \textbf{x}_{i}^{n} + \Delta t \; \textbf{u}(\textbf{x}_{i}^{n})
-\] +\]" src="form_6639.png"/>

          -

          where $\textbf{x}_{i}^{n+1}$ and $\textbf{x}_{i}^{n}$ are the position of particle $i$ at time $t+\Delta t$ and $t$, respectively and where $\Delta t$ is the time step. In the present step, the velocity at the location of particles is obtained in two different fashions:

            +

            where $\textbf{x}_{i}^{n+1}$ and $\textbf{x}_{i}^{n}$ are the position of particle $i$ at time $t+\Delta t$ and $t$, respectively and where $\Delta t$ is the time step. In the present step, the velocity at the location of particles is obtained in two different fashions:

            • By evaluating the velocity function at the location of the particles;
            • By evaluating the velocity function on a background triangulation and, using a finite element support, interpolating at the position of the particle.
            @@ -189,17 +189,17 @@

            In this section we only discussed the particle-specific challenges in distributed computation. Parallel challenges that particles share with finite-element solutions (parallel output, data transfer during mesh refinement) can be addressed with the solutions found for finite-element problems already discussed in other examples.

            The testcase

            In the present step, we use particles as massless tracers to illustrate the dynamics of a particular vortical flow: the Rayleigh–Kothe vortex. This flow pattern is generally used as a complex test case for interface tracking methods (e.g., volume-of-fluid and level set approaches) since it leads to strong rotation and elongation of the fluid [Blais2013].

            -

            The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

            +

            The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:

            -\[
+<picture><source srcset=\[
 \Psi = \frac{1}{\pi} \sin^2 (\pi x) \sin^2 (\pi y) \cos \left( \pi \frac{t}{T} \right)
-\] +\]" src="form_6643.png"/>

            where $T$ is half the period of the flow. The velocity profile in 2D ( $\textbf{u}=[u,v]^T$) is :

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
    u &=&  - \frac{\partial\Psi}{\partial y} = -2 \sin^2 (\pi x) \sin (\pi y) \cos (\pi y)  \cos \left( \pi \frac{t}{T} \right)\\
    v &=&  \frac{\partial\Psi}{\partial x} = 2 \cos(\pi x) \sin(\pi x) \sin^2 (\pi y) \cos \left( \pi \frac{t}{T} \right)
-\end{eqnarray*} +\end{eqnarray*}" src="form_6644.png"/>

            The velocity profile is illustrated in the following animation:

            /usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 2024-11-15 06:44:32.279697032 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_69.html 2024-11-15 06:44:32.279697032 +0000 @@ -183,7 +183,7 @@ \end{bmatrix}, \end{align*}" src="form_6658.png"/>

            -

            where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

            +

            where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by a closed-form equation of state. In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by

            \begin{align*}
 p = p(\textbf{u}) := (\gamma -1) \Big(E -
 \tfrac{|\textbf{m}|^2}{2\,\rho}
@@ -205,7 +205,7 @@
 - {\epsilon} \Delta \mathbf{u}^{\epsilon} = 0.
 \end{align}

            -

            Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as viscosity solutions. (This is, because physically $\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

            +

            Such solutions, which are understood as the solution recovered in the zero-viscosity limit, are often referred to as viscosity solutions. (This is, because physically $\epsilon$ can be understood as related to the viscosity of the fluid, i.e., a quantity that indicates the amount of friction neighboring gas particles moving at different speeds exert on each other. The Euler equations themselves are derived under the assumption of no friction, but can physically be expected to describe the limiting case of vanishing friction or viscosity.) Global existence and uniqueness of such solutions is an open issue. However, we know at least that if such viscosity solutions exists they have to satisfy the constraint $\textbf{u}(\mathbf{x},t) \in \mathcal{B}$ for all $\mathbf{x} \in \Omega$ and $t \geq 0$ where

            \begin{align}
   \mathcal{B} = \big\{ \textbf{u} =
   [\rho, \textbf{m}^\top,E]^{\top} \in \mathbb{R}^{d+2} \, \big |
@@ -224,7 +224,7 @@
 \end{align}

            We will refer to $\mathcal{B}$ as the invariant set of Euler's equations. In other words, a state $\mathbf{u}(\mathbf{x},t)\in\mathcal{B}$ obeys positivity of the density, positivity of the internal energy, and a local minimum principle on the specific entropy. This condition is a simplified version of a class of pointwise stability constraints satisfied by the exact (viscosity) solution. By pointwise we mean that the constraint has to be satisfied at every point of the domain, not just in an averaged (integral, or high order moments) sense.

            -

            In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

            +

            In context of a numerical approximation, a violation of such a constraint has dire consequences: it almost surely leads to catastrophic failure of the numerical scheme, loss of hyperbolicity, and overall, loss of well-posedness of the (discrete) problem. It would also mean that we have computed something that can not be interpreted physically. (For example, what are we to make of a computed solution with a negative density?) In the following we will formulate a scheme that ensures that the discrete approximation of $\mathbf{u}(\mathbf{x},t)$ remains in $\mathcal{B}$.

            Variational versus collocation-type discretizations

            Following step-9, step-12, step-33, and step-67, at this point it might look tempting to base a discretization of Euler's equations on a (semi-discrete) variational formulation:

            \begin{align*}
@@ -268,11 +268,11 @@
 </p>
 <p> where</p><ul>
 <li><picture><source srcset=$m_i \dealcoloneq \int_{\Omega} \phi_i \, \mathrm{d}\mathbf{x}$ is the lumped mass matrix -

          • $\tau$ is the time step size
          • +
          • $\tau$ is the time step size
          • $\mathbf{c}_{ij} \dealcoloneq \int_{\Omega} \nabla\phi_j\phi_i \,
     \mathrm{d}\mathbf{x}$ (note that $\mathbf{c}_{ij}\in \mathbb{R}^d$) is a vector-valued matrix that was used to approximate the divergence of the flux in a weak sense.
          • $\mathcal{I}(i) \dealcoloneq \{j \in \mathcal{V} \ | \ \mathbf{c}_{ij}
-    \not \equiv \boldsymbol{0}\} \cup \{i\}$ is the adjacency list containing all degrees of freedom coupling to the index $i$. In other words $\mathcal{I}(i)$ contains all nonzero column indices for row index i. $\mathcal{I}(i)$ will also be called a "stencil".
          • + \not \equiv \boldsymbol{0}\} \cup \{i\}$" src="form_6699.png"/> is the adjacency list containing all degrees of freedom coupling to the index $i$. In other words $\mathcal{I}(i)$ contains all nonzero column indices for row index i. $\mathcal{I}(i)$ will also be called a "stencil".
          • $\mathbb{f}(\mathbf{U}_j^{n})$ is the flux $\mathbb{f}$ of the hyperbolic system evaluated for the state $\mathbf{U}_j^{n}$ associated with support point $\mathbf{x}_j$.
          • $d_{ij} \dealcoloneq \max \{ \lambda_{\text{max}}
     (\mathbf{U}_i^{n},\mathbf{U}_j^{n}, \textbf{n}_{ij}),
@@ -289,7 +289,7 @@
     \lambda_{\text{max}} (\mathbf{U}_j^{n}, \mathbf{U}_i^{n},
     \textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\| $, which will constitute the bulk of the computational cost.
          -

          Consider the following pseudo-code, illustrating a possible straight forward strategy for computing the solution $\textbf{U}^{n+1}$ at a new time $t_{n+1} = t_n + \tau_n$ given a known state $\textbf{U}^{n}$ at time $t_n$:

          +

          Consider the following pseudo-code, illustrating a possible straight forward strategy for computing the solution $\textbf{U}^{n+1}$ at a new time $t_{n+1} = t_n + \tau_n$ given a known state $\textbf{U}^{n}$ at time $t_n$:

          \begin{align*}
 &\textbf{for } i \in \mathcal{V} \\
 &\ \ \ \  \{\mathbf{c}_{ij}\}_{j \in \mathcal{I}(i)} \leftarrow
@@ -313,7 +313,7 @@
 <li><picture><source srcset=$\mathtt{gather\_cij\_vectors}$, $\mathtt{gather\_state\_vectors}$, and $\mathtt{scatter\_updated\_state}$ are hypothetical implementations that either collect (from) or write (into) global matrices and vectors.

        1. If we assume a Cartesian mesh in two space dimensions, first-order polynomial space $\mathbb{Q}^1$, and that $\mathbf{x}_i$ is an interior node (i.e. $\mathbf{x}_i$ is not on the boundary of the domain) then: $\{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)}$ should contain nine state vector elements (i.e. all the states in the patch/macro element associated to the shape function $\phi_i$). This is one of the major differences with the usual cell-based loop where the gather functionality (encoded in FEValuesBase<dim, spacedim>.get_function_values() in the case of deal.II) only collects values for the local cell (just a subset of the patch).
      -

      The actual implementation will deviate from above code in one key aspect: the time-step size $\tau$ has to be chosen subject to a CFL condition

      +

      The actual implementation will deviate from above code in one key aspect: the time-step size $\tau$ has to be chosen subject to a CFL condition

      \begin{align*}
   \tau_n = c_{\text{cfl}}\,\min_{
   i\in\mathcal{V}}\left(\frac{m_i}{-2\,d_{ii}^{n}}\right),
@@ -375,7 +375,7 @@
 \boldsymbol{(3)}
 \end{align*}

      -

      where $p_i$ is the pressure at the nodes that lie at the boundary. Clearly $\boldsymbol{(3)}$ is the discrete counterpart of $\boldsymbol{(2)}$. The proof of identity $\boldsymbol{(3)}$ is omitted, but we briefly mention that it hinges on the definition of the nodal normal $\widehat{\boldsymbol{\nu}}_i$ provided in $\boldsymbol{(1)}$. We also note that this enforcement of reflecting boundary conditions is different from the one originally advanced in [GuermondEtAl2018].

      +

      where $p_i$ is the pressure at the nodes that lie at the boundary. Clearly $\boldsymbol{(3)}$ is the discrete counterpart of $\boldsymbol{(2)}$. The proof of identity $\boldsymbol{(3)}$ is omitted, but we briefly mention that it hinges on the definition of the nodal normal $\widehat{\boldsymbol{\nu}}_i$ provided in $\boldsymbol{(1)}$. We also note that this enforcement of reflecting boundary conditions is different from the one originally advanced in [GuermondEtAl2018].

      The commented program

      Include files

      The set of include files is quite standard. The most intriguing part is the fact that we will rely solely on deal.II data structures for MPI parallelization, in particular parallel::distributed::Triangulation and LinearAlgebra::distributed::Vector included through distributed/tria.h and lac/la_parallel_vector.h. Instead of a Trilinos, or PETSc specific matrix class, we will use a non-distributed SparseMatrix (lac/sparse_matrix.h) to store the local part of the $\mathbf{c}_{ij}$, $\mathbf{n}_{ij}$ and $d_{ij}$ matrices.

      @@ -607,7 +607,7 @@
       

      The TimeStepping class

      With the OfflineData and ProblemDescription classes at hand we can now implement the explicit time-stepping scheme that was introduced in the discussion above. The main method of the TimeStepping class is make_one_step(vector_type &U, - const double t) that takes a reference to a state vector U and a time point t (as input arguments) computes the updated solution, stores it in the vector temp, swaps its contents with the vector U, and finally returns the chosen step-size $\tau$.

      + const double t) that takes a reference to a state vector U and a time point t (as input arguments) computes the updated solution, stores it in the vector temp, swaps its contents with the vector U, and finally returns the chosen step-size $\tau$.

      The other important method is prepare() which primarily sets the proper partition and sparsity pattern for the temporary vector temp and the matrix dij_matrix respectively.

        template <int dim>
        class TimeStepping : public ParameterAcceptor
      @@ -1018,7 +1018,7 @@
        return result;
        }
       
      -

      gather() (first interface): this first function signature, having three input arguments, will be used to retrieve the individual components (i,l) of a matrix. The functionality of gather_get_entry() and gather() is very much the same, but their context is different: the function gather() does not rely on an iterator (that actually knows the value pointed to) but rather on the indices (i,l) of the entry in order to retrieve its actual value. We should expect gather() to be slightly more expensive than gather_get_entry(). The use of gather() will be limited to the task of computing the algebraic viscosity $d_{ij}$ in the particular case that when both $i$ and $j$ lie at the boundary.

      +

      gather() (first interface): this first function signature, having three input arguments, will be used to retrieve the individual components (i,l) of a matrix. The functionality of gather_get_entry() and gather() is very much the same, but their context is different: the function gather() does not rely on an iterator (that actually knows the value pointed to) but rather on the indices (i,l) of the entry in order to retrieve its actual value. We should expect gather() to be slightly more expensive than gather_get_entry(). The use of gather() will be limited to the task of computing the algebraic viscosity $d_{ij}$ in the particular case that when both $i$ and $j$ lie at the boundary.

      Note
      The reader should be aware that accessing an arbitrary (i,l) entry of a matrix (say for instance Trilinos or PETSc matrices) is in general unacceptably expensive. Here is where we might want to keep an eye on complexity: we want this operation to have constant complexity, which is the case of the current implementation using deal.II matrices.
        template <std::size_t k>
      @@ -1248,8 +1248,8 @@
      void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)

      At this point in time we are done with the computation of $m_i$ and $\mathbf{c}_{ij}$, but so far the matrix nij_matrix contains just a copy of the matrix cij_matrix. That's not what we really want: we have to normalize its entries. In addition, we have not filled the entries of the matrix norm_matrix and the vectors stored in the map OfflineData<dim>::BoundaryNormalMap are not normalized.

      In principle, this is just offline data, it doesn't make much sense to over-optimize their computation, since their cost will get amortized over the many time steps that we are going to use. However, computing/storing the entries of the matrix norm_matrix and the normalization of nij_matrix are perfect to illustrate thread-parallel node-loops:

        -
      • we want to visit every node $i$ in the mesh/sparsity graph,
      • -
      • and for every such node we want to visit to every $j$ such that $\mathbf{c}_{ij} \not \equiv 0$.
      • +
      • we want to visit every node $i$ in the mesh/sparsity graph,
      • +
      • and for every such node we want to visit to every $j$ such that $\mathbf{c}_{ij} \not \equiv 0$.

      From an algebraic point of view, this is equivalent to: visiting every row in the matrix and for each one of these rows execute a loop on the columns. Node-loops is a core theme of this tutorial step (see the pseudo-code in the introduction) that will repeat over and over again. That's why this is the right time to introduce them.

      We have the thread parallelization capability parallel::apply_to_subranges() that is somehow more general than the WorkStream framework. In particular, parallel::apply_to_subranges() can be used for our node-loops. This functionality requires four input arguments which we explain in detail (for the specific case of our thread-parallel node loops):

        @@ -1398,7 +1398,7 @@
          const auto perpendicular_m = m - projected_U[1] * n_ij;
          projected_U[2] = U[1 + dim] - 0.5 * perpendicular_m.norm_square() / U[0];
         
        -

        We return the 1d state in primitive variables instead of conserved quantities. The return array consists of density $\rho$, velocity $u$, pressure $p$ and local speed of sound $a$:

        +

        We return the 1d state in primitive variables instead of conserved quantities. The return array consists of density $\rho$, velocity $u$, pressure $p$ and local speed of sound $a$:

          return {{projected_U[0],
          projected_U[1] / projected_U[0],
          ProblemDescription<1>::pressure(projected_U),
        @@ -1584,7 +1584,7 @@
          initial_direction /= initial_direction.norm();
         
        #define AssertThrow(cond, exc)
        -

        Next, we implement the initial_state function object with a lambda function computing a uniform flow field. For this we have to translate a given primitive 1d state (density $\rho$, velocity $u$, and pressure $p$) into a conserved n-dimensional state (density $\rho$, momentum $\mathbf{m}$, and total energy $E$).

        +

        Next, we implement the initial_state function object with a lambda function computing a uniform flow field. For this we have to translate a given primitive 1d state (density $\rho$, velocity $u$, and pressure $p$) into a conserved n-dimensional state (density $\rho$, momentum $\mathbf{m}$, and total energy $E$).

          initial_state = [this](const Point<dim> & /*point*/, double /*t*/) {
          const double rho = initial_1d_state[0];
          const double u = initial_1d_state[1];
        @@ -1638,7 +1638,7 @@
          dij_matrix.reinit(offline_data->sparsity_pattern);
          }
         
        -

        It is now time to implement the forward Euler step. Given a (writable reference) to the old state U at time $t$ we update the state U in place and return the chosen time-step size. We first declare a number of read-only references to various different variables and data structures. We do this is mainly to have shorter variable names (e.g., sparsity instead of offline_data->sparsity_pattern).

        +

        It is now time to implement the forward Euler step. Given a (writable reference) to the old state U at time $t$ we update the state U in place and return the chosen time-step size. We first declare a number of read-only references to various different variables and data structures. We do this is mainly to have shorter variable names (e.g., sparsity instead of offline_data->sparsity_pattern).

          template <int dim>
          double TimeStepping<dim>::make_one_step(vector_type &U, const double t)
          {
        @@ -1688,7 +1688,7 @@
          {
          const auto j = jt->column();
         
        -

        We only compute $d_{ij}$ if $j < i$ (upper triangular entries) and later copy the values over to $d_{ji}$.

        +

        We only compute $d_{ij}$ if $j < i$ (upper triangular entries) and later copy the values over to $d_{ji}$.

          if (j >= i)
          continue;
         
        @@ -1956,30 +1956,30 @@
          schlieren.reinit(offline_data->partitioner);
          }
         
        -

        We now discuss the implementation of the class member SchlierenPostprocessor<dim>::compute_schlieren(), which basically takes a component of the state vector U and computes the Schlieren indicator for such component (the formula of the Schlieren indicator can be found just before the declaration of the class SchlierenPostprocessor). We start by noting that this formula requires the "nodal gradients" $\nabla r_j$. However, nodal values of gradients are not defined for $\mathcal{C}^0$ finite element functions. More generally, pointwise values of gradients are not defined for $W^{1,p}(\Omega)$ functions. The simplest technique we can use to recover gradients at nodes is weighted-averaging i.e.

        +

        We now discuss the implementation of the class member SchlierenPostprocessor<dim>::compute_schlieren(), which basically takes a component of the state vector U and computes the Schlieren indicator for such component (the formula of the Schlieren indicator can be found just before the declaration of the class SchlierenPostprocessor). We start by noting that this formula requires the "nodal gradients" $\nabla r_j$. However, nodal values of gradients are not defined for $\mathcal{C}^0$ finite element functions. More generally, pointwise values of gradients are not defined for $W^{1,p}(\Omega)$ functions. The simplest technique we can use to recover gradients at nodes is weighted-averaging i.e.

        -\[ \nabla r_j \dealcoloneq \frac{1}{\int_{S_i} \omega_i(\mathbf{x}) \,
+<picture><source srcset=\[ \nabla r_j \dealcoloneq \frac{1}{\int_{S_i} \omega_i(\mathbf{x}) \,
    \mathrm{d}\mathbf{x}}
    \int_{S_i} r_h(\mathbf{x}) \omega_i(\mathbf{x}) \, \mathrm{d}\mathbf{x}
-   \ \ \ \ \ \mathbf{(*)} \] + \ \ \ \ \ \mathbf{(*)} \]" src="form_6800.png"/>

        -

        where $S_i$ is the support of the shape function $\phi_i$, and $\omega_i(\mathbf{x})$ is the weight. The weight could be any positive function such as $\omega_i(\mathbf{x}) \equiv 1$ (that would allow us to recover the usual notion of mean value). But as usual, the goal is to reuse the off-line data as much as possible. In this sense, the most natural choice of weight is $\omega_i = \phi_i$. Inserting this choice of weight and the expansion $r_h(\mathbf{x}) = \sum_{j \in \mathcal{V}}
-   r_j \phi_j(\mathbf{x})$ into $\mathbf{(*)}$ we get :

        +

        where $S_i$ is the support of the shape function $\phi_i$, and $\omega_i(\mathbf{x})$ is the weight. The weight could be any positive function such as $\omega_i(\mathbf{x}) \equiv 1$ (that would allow us to recover the usual notion of mean value). But as usual, the goal is to reuse the off-line data as much as possible. In this sense, the most natural choice of weight is $\omega_i = \phi_i$. Inserting this choice of weight and the expansion $r_h(\mathbf{x}) = \sum_{j \in \mathcal{V}}
+   r_j \phi_j(\mathbf{x})$ into $\mathbf{(*)}$ we get :

        -\[ \nabla r_j \dealcoloneq \frac{1}{m_i} \sum_{j \in \mathcal{I}(i)} r_j
-   \mathbf{c}_{ij} \ \ \ \ \ \mathbf{(**)} \, . \] +\[ \nabla r_j \dealcoloneq \frac{1}{m_i} \sum_{j \in \mathcal{I}(i)} r_j
+   \mathbf{c}_{ij} \ \ \ \ \ \mathbf{(**)} \, . \]

        Using this last formula we can recover averaged nodal gradients without resorting to any form of quadrature. This idea aligns quite well with the whole spirit of edge-based schemes (or algebraic schemes) where we want to operate on matrices and vectors as directly as it could be possible avoiding by all means assembly of bilinear forms, cell-loops, quadrature, or any other intermediate construct/operation between the input arguments (the state from the previous time-step) and the actual matrices and vectors required to compute the update.

        -

        The second thing to note is that we have to compute global minimum and maximum $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$. Following the same ideas used to compute the time step size in the class member TimeStepping<dim>::step() we define $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$ as atomic doubles in order to resolve any conflicts between threads. As usual, we use Utilities::MPI::max() and Utilities::MPI::min() to find the global maximum/minimum among all MPI processes.

        +

        The second thing to note is that we have to compute global minimum and maximum $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$. Following the same ideas used to compute the time step size in the class member TimeStepping<dim>::step() we define $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$ as atomic doubles in order to resolve any conflicts between threads. As usual, we use Utilities::MPI::max() and Utilities::MPI::min() to find the global maximum/minimum among all MPI processes.

        Finally, it is not possible to compute the Schlieren indicator in a single loop over all nodes. The entire operation requires two loops over nodes:

          -
        • The first loop computes $|\nabla r_i|$ for all $i \in \mathcal{V}$ in the mesh, and the bounds $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$.
        • +
        • The first loop computes $|\nabla r_i|$ for all $i \in \mathcal{V}$ in the mesh, and the bounds $\max_j |\nabla r_j|$ and $\min_j |\nabla r_j|$.
        • The second loop finally computes the Schlieren indicator using the formula

        -\[ \text{schlieren}[i] = e^{\beta \frac{ |\nabla r_i|
+<picture><source srcset=\[ \text{schlieren}[i] = e^{\beta \frac{ |\nabla r_i|
    - \min_j |\nabla r_j| }{\max_j |\nabla r_j| - \min_j |\nabla r_j| } }
-   \, . \] + \, . \]" src="form_6810.png"/>

        This means that we will have to define two workers on_subranges for each one of these stages.

          template <int dim>
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2024-11-15 06:44:32.339697568 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_7.html 2024-11-15 06:44:32.339697568 +0000 @@ -161,34 +161,34 @@

        Verification of correctness

        There has probably never been a non-trivial finite element program that worked right from the start. It is therefore necessary to find ways to verify whether a computed solution is correct or not. Usually, this is done by choosing the set-up of a simulation in such a way that we know the exact continuous solution and evaluate the difference between continuous and computed discrete solution. If this difference converges to zero with the right order of convergence, this is already a good indication of correctness, although there may be other sources of error persisting which have only a small contribution to the total error or are of higher order. In the context of finite element simulations, this technique of picking the solution by choosing appropriate right hand sides and boundary conditions is often called the Method of Manufactured Solution. (We will come back to how exactly we construct the solution in this method below, after discussing the equation we want to solve.)

        In this example, we will not go into the theories of systematic software verification which is a complicated problem in general. Rather we will demonstrate the tools which deal.II can offer in this respect. This is basically centered around the functionality of a single function, VectorTools::integrate_difference(). This function computes the difference between a given continuous function and a finite element field in various norms on each cell. Of course, like with any other integral, we can only evaluate these norms using quadrature formulas; the choice of the right quadrature formula is therefore crucial to the accurate evaluation of the error. This holds in particular for the $L_\infty$ norm, where we evaluate the maximal deviation of numerical and exact solution only at the quadrature points; one should then not try to use a quadrature rule whose evaluation occurs only at points where super-convergence might occur, such as the Gauss points of the lowest-order Gauss quadrature formula for which the integrals in the assembly of the matrix is correct (e.g., for linear elements, do not use the QGauss(2) quadrature formula). In fact, this is generally good advice also for the other norms: if your quadrature points are fortuitously chosen at locations where the error happens to be particularly small due to superconvergence, the computed error will look like it is much smaller than it really is and may even suggest a higher convergence order. Consequently, we will choose a different quadrature formula for the integration of these error norms than for the assembly of the linear system.

        -

        The function VectorTools::integrate_difference() evaluates the desired norm on each cell $K$ of the triangulation and returns a vector which holds these values for each cell. From the local values, we can then obtain the global error. For example, if the vector $\mathbf e$ with element $e_K$ for all cells $K$ contains the local $L_2$ norms $\|u-u_h\|_K$, then

        -\[
+<p>The function <a class=VectorTools::integrate_difference() evaluates the desired norm on each cell $K$ of the triangulation and returns a vector which holds these values for each cell. From the local values, we can then obtain the global error. For example, if the vector $\mathbf e$ with element $e_K$ for all cells $K$ contains the local $L_2$ norms $\|u-u_h\|_K$, then

        +\[
   E = \| {\mathbf e} \| = \left( \sum_K e_K^2 \right)^{1/2}
-\] +\]" src="form_6814.png"/>

        -

        is the global $L_2$ error $E=\|u-u_h\|_\Omega$.

        +

        is the global $L_2$ error $E=\|u-u_h\|_\Omega$.

        In the program, we will show how to evaluate and use these quantities, and we will monitor their values under mesh refinement. Of course, we have to choose the problem at hand such that we can explicitly state the solution and its derivatives, but since we want to evaluate the correctness of the program, this is only reasonable. If we know that the program produces the correct solution for one (or, if one wants to be really sure: many) specifically chosen right hand sides, we can be rather confident that it will also compute the correct solution for problems where we don't know the exact values.

        In addition to simply computing these quantities, we will show how to generate nicely formatted tables from the data generated by this program that automatically computes convergence rates etc. In addition, we will compare different strategies for mesh refinement.

        Non-homogeneous Neumann boundary conditions

        The second, totally unrelated, subject of this example program is the use of non-homogeneous boundary conditions. These are included into the variational form using boundary integrals which we have to evaluate numerically when assembling the right hand side vector.

        Before we go into programming, let's have a brief look at the mathematical formulation. The equation that we want to solve here is the Helmholtz equation "with the nice sign":

        -\[
+<picture><source srcset=\[
   -\Delta u + \alpha u = f,
-\] +\]" src="form_6816.png"/>

        -

        on the square $[-1,1]^2$ with $\alpha=1$, augmented by Dirichlet boundary conditions

        -\[
+<p> on the square <picture><source srcset=$[-1,1]^2$ with $\alpha=1$, augmented by Dirichlet boundary conditions

        +\[
   u = g_1
-\] +\]" src="form_6817.png"/>

        on some part $\Gamma_1$ of the boundary $\Gamma$, and Neumann conditions

        -\[
+<picture><source srcset=\[
   {\mathbf n}\cdot \nabla u = g_2
-\] +\]" src="form_6818.png"/>

        -

        on the rest $\Gamma_2 = \Gamma \backslash \Gamma_1$. In our particular testcase, we will use $\Gamma_1=\Gamma \cap\{\{x=1\}
-\cup \{y=1\}\}$. (We say that this equation has the "nice sign" because the operator $-\Delta + \alpha I$ with the identity $I$ and $\alpha>0$ is a positive definite operator; the equation with the "bad sign" is $-\Delta u - \alpha u$ and results from modeling time-harmonic processes. For the equation with the "bad sign", the operator $-\Delta-\alpha I$ is not positive definite if $\alpha>0$ is large, and this leads to all sorts of issues we need not discuss here. The operator may also not be invertible – i.e., the equation does not have a unique solution – if $\alpha$ happens to be one of the eigenvalues of $-\Delta$.)

        -

        Using the above definitions, we can state the weak formulation of the equation, which reads: find $u\in H^1_g=\{v\in H^1: v|_{\Gamma_1}=g_1\}$ such that

        +

        on the rest $\Gamma_2 = \Gamma \backslash \Gamma_1$. In our particular testcase, we will use $\Gamma_1=\Gamma \cap\{\{x=1\}
+\cup \{y=1\}\}$. (We say that this equation has the "nice sign" because the operator $-\Delta + \alpha I$ with the identity $I$ and $\alpha>0$ is a positive definite operator; the equation with the "bad sign" is $-\Delta u - \alpha u$ and results from modeling time-harmonic processes. For the equation with the "bad sign", the operator $-\Delta-\alpha I$ is not positive definite if $\alpha>0$ is large, and this leads to all sorts of issues we need not discuss here. The operator may also not be invertible – i.e., the equation does not have a unique solution – if $\alpha$ happens to be one of the eigenvalues of $-\Delta$.)

        +

        Using the above definitions, we can state the weak formulation of the equation, which reads: find $u\in H^1_g=\{v\in H^1: v|_{\Gamma_1}=g_1\}$ such that

        \[
   {(\nabla v, \nabla u)}_\Omega + {(v,u)}_\Omega
   =
@@ -206,13 +206,13 @@
 </p>
 <p> Since the generation of the domain integrals has been shown in previous examples several times, only the generation of the contour integral is of interest here. It basically works along the following lines: for domain integrals we have the <code><a class=FEValues class that provides values and gradients of the shape values, as well as Jacobian determinants and other information and specified quadrature points in the cell; likewise, there is a class FEFaceValues that performs these tasks for integrations on faces of cells. One provides it with a quadrature formula for a manifold with dimension one less than the dimension of the domain is, and the cell and the number of its face on which we want to perform the integration. The class will then compute the values, gradients, normal vectors, weights, etc. at the quadrature points on this face, which we can then use in the same way as for the domain integrals. The details of how this is done are shown in the following program.

        The method of manufactured solutions

        -

        Because we want to verify the convergence of our numerical solution $u_h$, we want a setup so that we know the exact solution $u$. This is where the Method of Manufactured Solutions comes in: Let us choose a function

        +

        Because we want to verify the convergence of our numerical solution $u_h$, we want a setup so that we know the exact solution $u$. This is where the Method of Manufactured Solutions comes in: Let us choose a function

        \[
   \bar u(\mathbf x) =
   \sum_{i=1}^3 \exp\left(-\frac{|\mathbf x-\mathbf x_i|^2}{\sigma^2}\right)
 \]

        -

        where the centers $x_i$ of the exponentials are $\mathbf x_1=(-\frac 12,\frac 12)$, $\mathbf x_2=(-\frac 12,-\frac 12)$, and $\mathbf x_3=(\frac 12,-\frac 12)$, and the half width is set to $\sigma=\frac {1}{8}$. The method of manufactured solution then says: choose

        +

        where the centers $x_i$ of the exponentials are $\mathbf x_1=(-\frac 12,\frac 12)$, $\mathbf x_2=(-\frac 12,-\frac 12)$, and $\mathbf x_3=(\frac 12,-\frac 12)$, and the half width is set to $\sigma=\frac {1}{8}$. The method of manufactured solution then says: choose

        \begin{align*}
   f &= -\Delta \bar u + \bar u, \\
   g_1 &= \bar u|_{\Gamma_1}, \\
@@ -220,7 +220,7 @@
 \end{align*}

        With this particular choice for $f,g_1,g_2$, the solution of the original problem must necessarily be $u=\bar u$. In other words, by choosing the right hand sides of the equation and the boundary conditions in a particular way, we have manufactured ourselves a problem to which we know the solution – a very useful case given that in all but the very simplest cases, PDEs do not have solutions we can just write down. This then allows us to compute the error of our numerical solution. In the code below, we represent $\bar u$ by the Solution class, and other classes will be used to denote $\bar u|_{\Gamma_1}=g_1$ and ${\mathbf n}\cdot \nabla\bar u|_{\Gamma_2}=g_2$.

        -
        Note
        In principle, you can choose whatever you want for the function $\bar u$ above – here we have simply chosen a sum of three exponentials. In practice, there are two considerations you want to take into account: (i) The function must be simple enough so that you can compute derivatives of the function with not too much effort, for example in order to determine what $f = -\Delta \bar u + \bar u$ is. Since the derivative of an exponential is relatively straightforward to compute, the choice above satisfies this requirement, whereas a function of the kind $\bar u(\mathbf x) = \text{atan}\left(\|\mathbf x\|^{\|\mathbf x\|}\right)$ would have presented greater difficulties. (ii) You don't want $\bar u$ be a polynomial of low degree. That is because if you choose the polynomial degree of your finite element sufficiently high, you can exactly represent this $\bar u$ with the numerical solution $u_h$, making the error zero regardless of how coarse or fine the mesh is. Verifying that this is so is a useful step, but it will not allow you to verify the correct order of convergence of $\|u-u_h\|$ as a function of the mesh size $h$ in the general case of arbitrary $f$. (iii) The typical finite element error estimates assume sufficiently smooth solutions, i.e., sufficiently smooth domains, right-hand sides $f$ and boundary conditions. As a consequence, you should choose a smooth solution $\bar u$ – for example, it shouldn't have kinks. (iv) You want a solution whose variations can be resolved on the meshes you consider to test convergence. For example, if you were to choose $\bar u(\mathbf x)=\sin(1000 x_1)\sin(1000 x_2)$, you shouldn't be surprised if you don't observe that the error decreases at the expected rate until your mesh is fine enough to actually resolve the high-frequency oscillations with substantially more than 1,000 mesh cells in each coordinate direction.
        +
        Note
        In principle, you can choose whatever you want for the function $\bar u$ above – here we have simply chosen a sum of three exponentials. In practice, there are two considerations you want to take into account: (i) The function must be simple enough so that you can compute derivatives of the function with not too much effort, for example in order to determine what $f = -\Delta \bar u + \bar u$ is. Since the derivative of an exponential is relatively straightforward to compute, the choice above satisfies this requirement, whereas a function of the kind $\bar u(\mathbf x) = \text{atan}\left(\|\mathbf x\|^{\|\mathbf x\|}\right)$ would have presented greater difficulties. (ii) You don't want $\bar u$ be a polynomial of low degree. That is because if you choose the polynomial degree of your finite element sufficiently high, you can exactly represent this $\bar u$ with the numerical solution $u_h$, making the error zero regardless of how coarse or fine the mesh is. Verifying that this is so is a useful step, but it will not allow you to verify the correct order of convergence of $\|u-u_h\|$ as a function of the mesh size $h$ in the general case of arbitrary $f$. (iii) The typical finite element error estimates assume sufficiently smooth solutions, i.e., sufficiently smooth domains, right-hand sides $f$ and boundary conditions. As a consequence, you should choose a smooth solution $\bar u$ – for example, it shouldn't have kinks. (iv) You want a solution whose variations can be resolved on the meshes you consider to test convergence. For example, if you were to choose $\bar u(\mathbf x)=\sin(1000 x_1)\sin(1000 x_2)$, you shouldn't be surprised if you don't observe that the error decreases at the expected rate until your mesh is fine enough to actually resolve the high-frequency oscillations with substantially more than 1,000 mesh cells in each coordinate direction.

        The solution $\bar u$ we choose here satisfies all of these requirements: (i) It is relatively straightforward to differentiate; (ii) it is not a polynomial; (iii) it is smooth; and (iv) it has a length scale of $\sigma=\frac {1}{8}$ which, on the domain $[-1,1]^d$ is relatively straightforward to resolve with 16 or more cells in each coordinate direction.

        A note on good programming practice

        Besides the mathematical topics outlined above, we also want to use this program to illustrate one aspect of good programming practice, namely the use of namespaces. In programming the deal.II library, we have take great care not to use names for classes and global functions that are overly generic, say f(), sz(), rhs() etc. Furthermore, we have put everything into namespace dealii. But when one writes application programs that aren't meant for others to use, one doesn't always pay this much attention. If you follow the programming style of step-1 through step-6, these functions then end up in the global namespace where, unfortunately, a lot of other stuff also lives (basically everything the C language provides, along with everything you get from the operating system through header files). To make things a bit worse, the designers of the C language were also not always careful in avoiding generic names; for example, the symbols j1, jn are defined in C header files (they denote Bessel functions).

        @@ -758,7 +758,7 @@
         

        Finally, we compute the maximum norm. Of course, we can't actually compute the true maximum of the error over all points in the domain, but only the maximum over a finite set of evaluation points that, for convenience, we will still call "quadrature points" and represent by an object of type Quadrature even though we do not actually perform any integration.

        -

        There is then the question of what points precisely we want to evaluate at. It turns out that the result we get depends quite sensitively on the "quadrature" points being used. There is also the issue of superconvergence: Finite element solutions are, on some meshes and for polynomial degrees $k\ge 2$, particularly accurate at the node points as well as at Gauss-Lobatto points, much more accurate than at randomly chosen points. (See [Li2019] and the discussion and references in Section 1.2 for more information on this.) In other words, if we are interested in finding the largest difference $u(\mathbf x)-u_h(\mathbf x)$, then we ought to look at points $\mathbf x$ that are specifically not of this "special" kind of points and we should specifically not use QGauss(fe->degree+1) to define where we evaluate. Rather, we use a special quadrature rule that is obtained by iterating the trapezoidal rule by the degree of the finite element times two plus one in each space direction. Note that the constructor of the QIterated class takes a one-dimensional quadrature rule and a number that tells it how often it shall repeat this rule in each space direction.

        +

        There is then the question of what points precisely we want to evaluate at. It turns out that the result we get depends quite sensitively on the "quadrature" points being used. There is also the issue of superconvergence: Finite element solutions are, on some meshes and for polynomial degrees $k\ge 2$, particularly accurate at the node points as well as at Gauss-Lobatto points, much more accurate than at randomly chosen points. (See [Li2019] and the discussion and references in Section 1.2 for more information on this.) In other words, if we are interested in finding the largest difference $u(\mathbf x)-u_h(\mathbf x)$, then we ought to look at points $\mathbf x$ that are specifically not of this "special" kind of points and we should specifically not use QGauss(fe->degree+1) to define where we evaluate. Rather, we use a special quadrature rule that is obtained by iterating the trapezoidal rule by the degree of the finite element times two plus one in each space direction. Note that the constructor of the QIterated class takes a one-dimensional quadrature rule and a number that tells it how often it shall repeat this rule in each space direction.

        Using this special quadrature rule, we can then try to find the maximal error on each cell. Finally, we compute the global L infinity error from the L infinity errors on each cell with a call to VectorTools::compute_global_error.

          const QTrapezoid<1> q_trapez;
          const QIterated<dim> q_iterated(q_trapez, fe->degree * 2 + 1);
        @@ -1226,12 +1226,12 @@
        6 3727 17143 2.868e-05 8.498e-03 1.462e-04
        7 7081 32343 1.146e-05 4.360e-03 8.576e-05
        8 13525 60895 3.747e-06 2.123e-03 2.174e-05
        -

        One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates can be seen. The linear and quadratic convergence rates of Q1 and Q2 elements in the $H^1$ semi-norm can clearly be seen, as are the quadratic and cubic rates in the $L_2$ norm.

        +

        One can see the error reduction upon grid refinement, and for the cases where global refinement was performed, also the convergence rates can be seen. The linear and quadratic convergence rates of Q1 and Q2 elements in the $H^1$ semi-norm can clearly be seen, as are the quadratic and cubic rates in the $L_2$ norm.

        Finally, the program also generated LaTeX versions of the tables (not shown here) that is written into a file in a way so that it could be copy-pasted into a LaTeX document.

        When is the error "small"?

        What we showed above is how to determine the size of the error $\|u-u_h\|$ in a number of different norms. We did this primarily because we were interested in testing that our solutions converge. But from an engineering perspective, the question is often more practical: How fine do I have to make my mesh so that the error is "small enough"? In other words, if in the table above the $H^1$ semi-norm has been reduced to 2.123e-03, is this good enough for me to sign the blueprint and declare that our numerical simulation showed that the bridge is strong enough?

        -

        In practice, we are rarely in this situation because I can not typically compare the numerical solution $u_h$ against the exact solution $u$ in situations that matter – if I knew $u$, I would not have to compute $u_h$. But even if I could, the question to ask in general is then: 2.123e-03 what? The solution will have physical units, say kg-times-meter-squared, and I'm integrating a function with units square of the above over the domain, and then take the square root. So if the domain is two-dimensional, the units of $\|u-u_h\|_{L_2}$ are kg-times-meter-cubed. The question is then: Is $2.123\times 10^{-3}$ kg-times-meter-cubed small? That depends on what you're trying to simulate: If you're an astronomer used to masses measured in solar masses and distances in light years, then yes, this is a fantastically small number. But if you're doing atomic physics, then no: That's not small, and your error is most certainly not sufficiently small; you need a finer mesh.

        -

        In other words, when we look at these sorts of numbers, we generally need to compare against a "scale". One way to do that is to not look at the absolute error $\|u-u_h\|$ in whatever norm, but at the relative* error $\|u-u_h\|/\|u\|$. If this ratio is $10^{-5}$, then you know that on average, the difference between $u$ and $u_h$ is 0.001 per cent – probably small enough for engineering purposes.

        +

        In practice, we are rarely in this situation because I can not typically compare the numerical solution $u_h$ against the exact solution $u$ in situations that matter – if I knew $u$, I would not have to compute $u_h$. But even if I could, the question to ask in general is then: 2.123e-03 what? The solution will have physical units, say kg-times-meter-squared, and I'm integrating a function with units square of the above over the domain, and then take the square root. So if the domain is two-dimensional, the units of $\|u-u_h\|_{L_2}$ are kg-times-meter-cubed. The question is then: Is $2.123\times 10^{-3}$ kg-times-meter-cubed small? That depends on what you're trying to simulate: If you're an astronomer used to masses measured in solar masses and distances in light years, then yes, this is a fantastically small number. But if you're doing atomic physics, then no: That's not small, and your error is most certainly not sufficiently small; you need a finer mesh.

        +

        In other words, when we look at these sorts of numbers, we generally need to compare against a "scale". One way to do that is to not look at the absolute error $\|u-u_h\|$ in whatever norm, but at the relative* error $\|u-u_h\|/\|u\|$. If this ratio is $10^{-5}$, then you know that on average, the difference between $u$ and $u_h$ is 0.001 per cent – probably small enough for engineering purposes.

        How do we compute $\|u\|$? We just need to do an integration loop over all cells, quadrature points on these cells, and then sum things up and take the square root at the end. But there is a simpler way often used: You can call

        Vector<double> zero_vector (dof_handler.n_dofs());
        @@ -1254,11 +1254,11 @@

        Possibilities for extensions

        Higher Order Elements

        -

        Go ahead and run the program with higher order elements ( $Q_3$, $Q_4$, ...). You will notice that assertions in several parts of the code will trigger (for example in the generation of the filename for the data output). You might have to address these, but it should not be very hard to get the program to work!

        +

        Go ahead and run the program with higher order elements ( $Q_3$, $Q_4$, ...). You will notice that assertions in several parts of the code will trigger (for example in the generation of the filename for the data output). You might have to address these, but it should not be very hard to get the program to work!

        Convergence Comparison

        -

        Is $Q_1$ or $Q_2$ better? What about adaptive versus global refinement? A (somewhat unfair but typical) metric to compare them, is to look at the error as a function of the number of unknowns.

        -

        To see this, create a plot in log-log style with the number of unknowns on the $x$ axis and the $L_2$ error on the $y$ axis. You can add reference lines for $h^2=N^{-1}$ and $h^3=N^{-3/2}$ and check that global and adaptive refinement follow those. If one makes the (not completely unreasonable) assumption that with a good linear solver, the computational effort is proportional to the number of unknowns $N$, then it is clear that an error reduction of ${\cal O}(N^{-3/2})$ is substantially better than a reduction of the form ${\cal O}(N^{-1})$: That is, that adaptive refinement gives us the desired error level with less computational work than if we used global refinement. This is not a particularly surprising conclusion, but it's worth checking these sorts of assumptions in practice.

        -

        Of course, a fairer comparison would be to plot runtime (switch to release mode first!) instead of number of unknowns on the $x$ axis. If you plotted run time (check out the Timer class!) against the number of unknowns by timing each refinement step, you will notice that the linear system solver we use in this program is not perfect – its run time grows faster than proportional to the linear system size – and picking a better linear solver might be appropriate for this kind of comparison.

        +

        Is $Q_1$ or $Q_2$ better? What about adaptive versus global refinement? A (somewhat unfair but typical) metric to compare them, is to look at the error as a function of the number of unknowns.

        +

        To see this, create a plot in log-log style with the number of unknowns on the $x$ axis and the $L_2$ error on the $y$ axis. You can add reference lines for $h^2=N^{-1}$ and $h^3=N^{-3/2}$ and check that global and adaptive refinement follow those. If one makes the (not completely unreasonable) assumption that with a good linear solver, the computational effort is proportional to the number of unknowns $N$, then it is clear that an error reduction of ${\cal O}(N^{-3/2})$ is substantially better than a reduction of the form ${\cal O}(N^{-1})$: That is, that adaptive refinement gives us the desired error level with less computational work than if we used global refinement. This is not a particularly surprising conclusion, but it's worth checking these sorts of assumptions in practice.

        +

        Of course, a fairer comparison would be to plot runtime (switch to release mode first!) instead of number of unknowns on the $x$ axis. If you plotted run time (check out the Timer class!) against the number of unknowns by timing each refinement step, you will notice that the linear system solver we use in this program is not perfect – its run time grows faster than proportional to the linear system size – and picking a better linear solver might be appropriate for this kind of comparison.

        To see how a comparison of this kind could work, take a look at [KronbichlerWall2018] , and specifically Figure 5 that illustrates the error as a function of compute time for a number of polynomial degrees (as well as a number of different ways to discretize the equation used there).

        The plain program

        /* ------------------------------------------------------------------------
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 2024-11-15 06:44:32.455698604 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_70.html 2024-11-15 06:44:32.455698604 +0000 @@ -154,21 +154,21 @@
        Note
        If you use this program as a basis for your own work, please consider citing it in your list of references. The initial version of this work was contributed to the deal.II project by the authors listed in the following citation: 10.5281/zenodo.3829064

        Introduction

        Massively parallel non-matching grid simulations of fluid structure interaction problems

        -

        In this tutorial we consider a mixing problem in the laminar flow regime. Such problems occur in a wide range of applications ranging from chemical engineering to power generation (e.g. turbomachinery). Mixing problems are particularly hard to solve numerically, because they often involve a container (with fixed boundaries, and possibly complex geometries such as baffles), represented by the domain $\Omega$, and one (or more) immersed and rotating impellers (represented by the domain $\Omega^{\text{imp}}$). The domain in which we would like to solve the flow equations is the (time dependent) difference between the two domains, namely: $\Omega\setminus\Omega^{\text{imp}}$.

        +

        In this tutorial we consider a mixing problem in the laminar flow regime. Such problems occur in a wide range of applications ranging from chemical engineering to power generation (e.g. turbomachinery). Mixing problems are particularly hard to solve numerically, because they often involve a container (with fixed boundaries, and possibly complex geometries such as baffles), represented by the domain $\Omega$, and one (or more) immersed and rotating impellers (represented by the domain $\Omega^{\text{imp}}$). The domain in which we would like to solve the flow equations is the (time dependent) difference between the two domains, namely: $\Omega\setminus\Omega^{\text{imp}}$.

        For rotating impellers, the use of Arbitrary Lagrangian Eulerian formulations (in which the fluid domain – along with the mesh! – is smoothly deformed to follow the deformations of the immersed solid) is not possible, unless only small times (i.e., small fluid domain deformations) are considered. If one wants to track the evolution of the flow across multiple rotations of the impellers, the resulting deformed grid would simply be too distorted to be useful.

        In this case, a viable alternative strategy would be to use non-matching methods (similarly to what we have done in step-60), where a background fixed grid (that may or may not be locally refined in time to better capture the solid motion) is coupled with a rotating, independent, grid.

        -

        In order to maintain the same notations used in step-60, we use $\Omega$ to denote the domain in ${\mathbb R}^{\text{spacedim}}$ representing the container of both the fluid and the impeller, and we use $\Gamma$ in ${\mathbb R}^{\text{dim}}$ to denote either the full impeller (when its spacedim measure is non-negligible, i.e., when we can represent it as a grid of dimension dim equal to spacedim), a co-dimension one representation of a thin impeller, or just the boundary of the full impeller.

        -

        The domain $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$) and it is non-matching: It does not, in general, align with any of the features of the volume mesh. We solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$ by some penalization techniques. In the current case, the condition is that the velocity of the fluid at points on $\Gamma$ equal the velocity of the solid impeller at that point.

        +

        In order to maintain the same notations used in step-60, we use $\Omega$ to denote the domain in ${\mathbb R}^{\text{spacedim}}$ representing the container of both the fluid and the impeller, and we use $\Gamma$ in ${\mathbb R}^{\text{dim}}$ to denote either the full impeller (when its spacedim measure is non-negligible, i.e., when we can represent it as a grid of dimension dim equal to spacedim), a co-dimension one representation of a thin impeller, or just the boundary of the full impeller.

        +

        The domain $\Gamma$ is embedded in $\Omega$ ( $\Gamma \subseteq \Omega$) and it is non-matching: It does not, in general, align with any of the features of the volume mesh. We solve a partial differential equation on $\Omega$, enforcing some conditions on the solution of the problem on the embedded domain $\Gamma$ by some penalization techniques. In the current case, the condition is that the velocity of the fluid at points on $\Gamma$ equal the velocity of the solid impeller at that point.

        The technique we describe here is presented in the literature using one of many names: the immersed finite element method and the fictitious boundary method among others. The main principle is that the discretization of the two grids are kept completely independent. In the present tutorial, this approach is used to solve for the motion of a viscous fluid, described by the Stokes equation, that is agitated by a rigid non-deformable impeller.

        -

        Thus, the equations solved in $\Omega$ are the Stokes equations for a creeping flow (i.e. a flow where $\text{Re}\rightarrow 0$) and a no-slip boundary condition is applied on the moving embedded domain $\Gamma$ associated with the impeller. However, this tutorial could be readily extended to other equations (e.g. the Navier-Stokes equations, linear elasticity equation, etc.). It can be seen as a natural extension of step-60 that enables the solution of large problems using a distributed parallel computing architecture via MPI.

        +

        Thus, the equations solved in $\Omega$ are the Stokes equations for a creeping flow (i.e. a flow where $\text{Re}\rightarrow 0$) and a no-slip boundary condition is applied on the moving embedded domain $\Gamma$ associated with the impeller. However, this tutorial could be readily extended to other equations (e.g. the Navier-Stokes equations, linear elasticity equation, etc.). It can be seen as a natural extension of step-60 that enables the solution of large problems using a distributed parallel computing architecture via MPI.

        However, contrary to step-60, the Dirichlet boundary conditions on $\Gamma$ are imposed weakly instead of through the use of Lagrange multipliers, and we concentrate on dealing with the coupling of two fully distributed triangulations (a combination that was not possible in the implementation of step-60).

        There are two interesting scenarios that occur when one wants to enforce conditions on the embedded domain $\Gamma$:

          -
        • The geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero. In this case, the imposition of the Dirichlet boundary boundary condition on $\Gamma$ is done through a volumetric penalization. If the applied penalization only depends on the velocity, this is often referred to as $\mathcal{L}^2$ penalization whereas if the penalization depends on both the velocity and its gradient, it is an $\mathcal{H}^1$ penalization. The case of the $\mathcal{L}^2$ penalization is very similar to a Darcy-type approach. Both $\mathcal{L}^2$ and $\mathcal{H}^1$ penalizations have been analyzed extensively (see, for example, [Angot1999]).
        • -
        • The embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain. This is of course physically impossible, but one may consider very thin sheets of metal moving in a fluid as essentially lower-dimensional if the thickness of the sheet is negligible. In this case, the boundary condition is imposed weakly on $\Gamma$ by applying the Nitsche method (see [Freund1995]).
        • +
        • The geometrical dimension dim of the embedded domain $\Gamma$ is the same of the domain $\Omega$ (spacedim), that is, the spacedim-dimensional measure of $\Gamma$ is not zero. In this case, the imposition of the Dirichlet boundary boundary condition on $\Gamma$ is done through a volumetric penalization. If the applied penalization only depends on the velocity, this is often referred to as $\mathcal{L}^2$ penalization whereas if the penalization depends on both the velocity and its gradient, it is an $\mathcal{H}^1$ penalization. The case of the $\mathcal{L}^2$ penalization is very similar to a Darcy-type approach. Both $\mathcal{L}^2$ and $\mathcal{H}^1$ penalizations have been analyzed extensively (see, for example, [Angot1999]).
        • +
        • The embedded domain $\Gamma$ has an intrinsic dimension dim which is smaller than that of $\Omega$ (spacedim), thus its spacedim-dimensional measure is zero; for example it is a curve embedded in a two dimensional domain, or a surface embedded in a three-dimensional domain. This is of course physically impossible, but one may consider very thin sheets of metal moving in a fluid as essentially lower-dimensional if the thickness of the sheet is negligible. In this case, the boundary condition is imposed weakly on $\Gamma$ by applying the Nitsche method (see [Freund1995]).

        Both approaches have very similar requirements and result in highly similar formulations. Thus, we treat them almost in the same way.

        -

        In this tutorial program we are not interested in further details on $\Gamma$: we assume that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

        +

        In this tutorial program we are not interested in further details on $\Gamma$: we assume that the dimension of the embedded domain (dim) is always smaller by one or equal with respect to the dimension of the embedding domain $\Omega$ (spacedim).

        We are going to solve the following differential problem: given a sufficiently regular function $g$ on $\Gamma$, find the solution $(\textbf{u},p)$ to

        \begin{eqnarray*}
@@ -179,7 +179,7 @@
 \end{eqnarray*}

        This equation, which we have normalized by scaling the time units in such a way that the viscosity has a numerical value of 1, describes slow, viscous flow such as honey or lava. The main goal of this tutorial is to show how to impose the velocity field condition $\mathbf{u} = \mathbf{g}$ on a non-matching $\Gamma$ in a weak way, using a penalization method. A more extensive discussion of the Stokes problem including body forces, different boundary conditions, and solution strategies can be found in step-22.

        -

        Let us start by considering the Stokes problem alone, in the entire domain $\Omega$. We look for a velocity field $\mathbf{u}$ and a pressure field $p$ that satisfy the Stokes equations with homogeneous boundary conditions on $\partial\Omega$.

        +

        Let us start by considering the Stokes problem alone, in the entire domain $\Omega$. We look for a velocity field $\mathbf{u}$ and a pressure field $p$ that satisfy the Stokes equations with homogeneous boundary conditions on $\partial\Omega$.

        The weak form of the Stokes equations is obtained by first writing it in vector form as

        \begin{eqnarray*}
   \begin{pmatrix}
@@ -195,7 +195,7 @@
   \end{pmatrix},
 \end{eqnarray*}

        -

        forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$, and integrating over the domain $\Omega$, yielding the following set of equations:

        +

        forming the dot product from the left with a vector-valued test function $\phi = \begin{pmatrix}\textbf{v} \\ q\end{pmatrix}$, and integrating over the domain $\Omega$, yielding the following set of equations:

        \begin{eqnarray*}
   (\mathrm v,
    -\Delta \textbf{u} + \nabla p)_{\Omega}
@@ -205,16 +205,16 @@
   0
 \end{eqnarray*}

        -

        which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
-\\ q\end{pmatrix}$.

        +

        which has to hold for all test functions $\phi = \begin{pmatrix}\textbf{v}
+\\ q\end{pmatrix}$.

        Integrating by parts and exploiting the boundary conditions on $\partial\Omega$, we obtain the following variational problem:

        \begin{eqnarray*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - (\textrm{div}\; \textbf{v}, p)_{\Omega}
  - (q, \textrm{div}\; \textbf{u})_{\Omega}&=& 0
 \end{eqnarray*}

        -

        where $(\cdot, \cdot)_{\Omega}$ represents the $L^2$ scalar product. This is the same variational form used in step-22.

        -

        This variational formulation does not take into account the embedded domain. Contrary to step-60, we do not enforce strongly the constraints of $\textbf{u}$ on $\Gamma$, but enforce them weakly via a penalization term.

        +

        where $(\cdot, \cdot)_{\Omega}$ represents the $L^2$ scalar product. This is the same variational form used in step-22.

        +

        This variational formulation does not take into account the embedded domain. Contrary to step-60, we do not enforce strongly the constraints of $\textbf{u}$ on $\Gamma$, but enforce them weakly via a penalization term.

        The analysis of this weak imposition of the boundary condition depends on the spacedim-dimensional measure of $\Gamma$ as either positive (if dim is equal to spacedim) or zero (if dim is smaller than spacedim). We discuss both scenarios.

        Co-dimension one case

        In this case, we assume that $\Gamma$ is the boundary of the actual impeller, that is, a closed curve embedded in a two-dimensional domain or a closed surface in a three-dimensional domain. The idea of this method starts by considering a weak imposition of the Dirichlet boundary condition on $\Gamma$, following the Nitsche method. This is achieved by using the following modified formulation on the fluid domain, where no strong conditions on the test functions on $\Gamma$ are imposed:

        @@ -248,9 +248,9 @@ + \beta (\textbf{v},\textbf{g})_{\Gamma}. \end{multline*}" src="form_6874.png"/>

        -

        Note the different sign of the first terms on the third and fourth lines. In this case, the stability and consistency conditions become $\beta > 0$. In the symmetric case, the value of $\beta$ is dependent on $h$, and it is in general chosen such that $\beta = C h^{-1} $ with $h$ a measure of size of the face being integrated and $C$ a constant such that $1 \leq C \leq 10$. This is as one usually does with the Nitsche penalty method to enforcing Dirichlet boundary conditions.

        -

        The non-symmetric approach, on the other hand, is related to how one enforced continuity for the non-symmetric interior penalty method for discontinuous Galerkin methods (the "NIPG" method [Riviere1999]). Even if the non-symmetric case seems advantageous w.r.t. possible choices of stabilization parameters, we opt for the symmetric discretization, since in this case it can be shown that the dual problem is also consistent, leading to a solution where not only the energy norm of the solution converges with the correct order, but also its $L^2$ norm. Furthermore, the resulting matrix remains symmetric.

        -

        The above formulation works under the assumption that the domain is discretized exactly. However, if the deformation of the impeller is a rigid body motion, it is possible to artificially extend the solution of the Stokes problem inside the propeller itself, since a rigid body motion is also a solution to the Stokes problem. The idea is then to solve the same problem, inside $\Omega^{\text{imp}}$, imposing the same boundary conditions on $\Gamma$, using the same penalization technique, and testing with test functions $\mathbf{v}$ which are globally continuous over $\Omega$.

        +

        Note the different sign of the first terms on the third and fourth lines. In this case, the stability and consistency conditions become $\beta > 0$. In the symmetric case, the value of $\beta$ is dependent on $h$, and it is in general chosen such that $\beta = C h^{-1} $ with $h$ a measure of size of the face being integrated and $C$ a constant such that $1 \leq C \leq 10$. This is as one usually does with the Nitsche penalty method to enforcing Dirichlet boundary conditions.

        +

        The non-symmetric approach, on the other hand, is related to how one enforced continuity for the non-symmetric interior penalty method for discontinuous Galerkin methods (the "NIPG" method [Riviere1999]). Even if the non-symmetric case seems advantageous w.r.t. possible choices of stabilization parameters, we opt for the symmetric discretization, since in this case it can be shown that the dual problem is also consistent, leading to a solution where not only the energy norm of the solution converges with the correct order, but also its $L^2$ norm. Furthermore, the resulting matrix remains symmetric.

        +

        The above formulation works under the assumption that the domain is discretized exactly. However, if the deformation of the impeller is a rigid body motion, it is possible to artificially extend the solution of the Stokes problem inside the propeller itself, since a rigid body motion is also a solution to the Stokes problem. The idea is then to solve the same problem, inside $\Omega^{\text{imp}}$, imposing the same boundary conditions on $\Gamma$, using the same penalization technique, and testing with test functions $\mathbf{v}$ which are globally continuous over $\Omega$.

        This results in the following (intermediate) formulation:

        \begin{multline*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - (\textrm{div}\;  \textbf{v}, p)_{\Omega}
@@ -273,9 +273,9 @@
 =  2\beta (\textbf{v},\textbf{g})_{\Gamma}.
 \end{multline*}

        -

        In step-60, the imposition of the constraint required the addition of new variables in the form of Lagrange multipliers. This is not the case for this tutorial program. The imposition of the boundary condition using Nitsche's method only modifies the system matrix and the right-hand side without adding additional unknowns. However, the velocity vector $\textbf{u}$ on the embedded domain will not match exactly the prescribed velocity $\textbf{g}$, but only up to a numerical error which is in the same order as the interpolation error of the finite element method. Furthermore, as in step-60, we still need to integrate over the non-matching embedded grid in order to construct the boundary term necessary to impose the boundary condition over $\Gamma$.

        +

        In step-60, the imposition of the constraint required the addition of new variables in the form of Lagrange multipliers. This is not the case for this tutorial program. The imposition of the boundary condition using Nitsche's method only modifies the system matrix and the right-hand side without adding additional unknowns. However, the velocity vector $\textbf{u}$ on the embedded domain will not match exactly the prescribed velocity $\textbf{g}$, but only up to a numerical error which is in the same order as the interpolation error of the finite element method. Furthermore, as in step-60, we still need to integrate over the non-matching embedded grid in order to construct the boundary term necessary to impose the boundary condition over $\Gamma$.

        Co-dimension zero case

        -

        In this case, $\Gamma$ has the same dimension, but is embedded into $\Omega$. We can think of this as a thick object moving around in the fluid. In the case of $\mathcal{L}^2$ penalization, the additional penalization term can be interpreted as a Darcy term within $\Gamma$, resulting in:

        +

        In this case, $\Gamma$ has the same dimension, but is embedded into $\Omega$. We can think of this as a thick object moving around in the fluid. In the case of $\mathcal{L}^2$ penalization, the additional penalization term can be interpreted as a Darcy term within $\Gamma$, resulting in:

        \begin{eqnarray*}
 (\nabla \textbf{v}, \nabla \textbf{u})_{\Omega} - & (\textrm{div}\;  \textbf{v}, p)_{\Omega}
@@ -295,7 +295,7 @@
 + \beta_2 (\nabla \textbf{v}, \nabla \textbf{g})_{\Gamma}.
 \end{eqnarray*}

        -

        Notice that the $L^2$ penalization (dim equal to spacedim) and the Nitsche penalization (dim equal to spacedim-1) result in the exact same numerical implementation, thanks to the dimension independent capabilities of deal.II.

        +

        Notice that the $L^2$ penalization (dim equal to spacedim) and the Nitsche penalization (dim equal to spacedim-1) result in the exact same numerical implementation, thanks to the dimension independent capabilities of deal.II.

        Representation of Ω and Γ

        In this tutorial, both the embedded grid $\Gamma$ and the embedding grid are described using a parallel::distributed::Triangulation. These two triangulations can be built from functions in the GridGenerator namespace or by reading a mesh file produced with another application (e.g. GMSH, see the discussion in step-49). This is slightly more general than what was previously done in step-60.

        The addition of the immersed boundary method, whether it is in the dim=spacedim or dim<spacedim case, only introduces additional terms in the system matrix and the right-hand side of the system which result from the integration over $\Gamma$. This does not modify the number of variables for which the problem must be solved. The challenge is thus related to the integrals that must be carried over $\Gamma$.

        @@ -308,15 +308,15 @@ \]" src="form_6885.png"/>

        Computing this sum is non-trivial because we have to evaluate $(v_j \circ F_{K})
-(\hat x_i)$. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $y_i = F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed.

        +(\hat x_i)$" src="form_6197.png"/>. In general, if $\Gamma$ and $\Omega$ are not aligned, the point $y_i = F_{K}(\hat x_i)$ is completely arbitrary with respect to $\Omega$, and unless we figure out a way to interpolate all basis functions of $V_h(\Omega)$ on an arbitrary point on $\Omega$, we cannot compute the integral needed.

        To evaluate $(v_j \circ F_{K}) (\hat x_i)$ the following steps needs to be taken (as shown in the picture below):

        • For a given cell $K$ in $\Gamma$ compute the real point $y_i \dealcoloneq F_{K} (\hat
-x_i)$, where $x_i$ is one of the quadrature points used for the integral on $K
+x_i)$, where $x_i$ is one of the quadrature points used for the integral on $K
 \subseteq \Gamma$. This is the easy part: FEValues::quadrature_point() gives us the real-space locations of all quadrature points.
        • -
        • Find the cell of $\Omega$ in which $y_i$ lies. We shall call this element $T$.
        • -
        • Find the reference coordinates within $T$ of $y_i$. For this, we need the inverse of the mapping $G_T$ that transforms the reference element $\hat T$ into the element $T$: $\hat y_i = G^{-1}_{T} (y_i)$.
        • -
        • Evaluate the basis function $v_j$ of the $\Omega$ mesh at this point $\hat y_i$. This is, again, relatively simple using FEValues.
        • +
        • Find the cell of $\Omega$ in which $y_i$ lies. We shall call this element $T$.
        • +
        • Find the reference coordinates within $T$ of $y_i$. For this, we need the inverse of the mapping $G_T$ that transforms the reference element $\hat T$ into the element $T$: $\hat y_i = G^{-1}_{T} (y_i)$.
        • +
        • Evaluate the basis function $v_j$ of the $\Omega$ mesh at this point $\hat y_i$. This is, again, relatively simple using FEValues.

        @@ -335,8 +335,8 @@ \sum_{K\in \Gamma} \sum_{i=1}^{n_q} \big(\hat{\textbf{u}}(\hat x_i) (\textbf{v} \circ F_{K}) (\hat x_i) J_K (\hat x_i) w_i \big) \]" src="form_6889.png"/>

        -

        If you followed the discussion above, then you will recall that $\textbf{u}$ and $\textbf{v}$ are shape functions defined on the fluid mesh. The only things defined on the solid mesh are: $F_K(\hat x_i)$, which is the location of a quadrature point on a solid cell that is part of $\Gamma$, $J_K$ is the determinant of its Jacobian, and $w_i$ the corresponding quadrature weight.

        -

        The important part to realize is now this: $w_i$ is a property of the quadrature formula and does not change with time. Furthermore, the Jacobian matrix of $F_K$ itself changes as the solid obstacle moves around in the fluid, but because the solid is considered non-deforming (it only translates and rotates, but doesn't dilate), the determinant of the Jacobian remains constant. As a consequence, the product $J_K(\hat x_i) w_i$ (which we typically denote by JxW) remains constant for each quadrature point. So the only thing we need keep track of are the positions $x_i=F_K(\hat x_i)$ – but these move with the velocity of the solid domain.

        +

        If you followed the discussion above, then you will recall that $\textbf{u}$ and $\textbf{v}$ are shape functions defined on the fluid mesh. The only things defined on the solid mesh are: $F_K(\hat x_i)$, which is the location of a quadrature point on a solid cell that is part of $\Gamma$, $J_K$ is the determinant of its Jacobian, and $w_i$ the corresponding quadrature weight.

        +

        The important part to realize is now this: $w_i$ is a property of the quadrature formula and does not change with time. Furthermore, the Jacobian matrix of $F_K$ itself changes as the solid obstacle moves around in the fluid, but because the solid is considered non-deforming (it only translates and rotates, but doesn't dilate), the determinant of the Jacobian remains constant. As a consequence, the product $J_K(\hat x_i) w_i$ (which we typically denote by JxW) remains constant for each quadrature point. So the only thing we need keep track of are the positions $x_i=F_K(\hat x_i)$ – but these move with the velocity of the solid domain.

        In other words, we don't actually need to keep the solid mesh at all. All we need is the positions $x_i(t)$ and corresponding JxW values. Since both of these properties are point-properties (or point-vectors) that are attached to the solid material, they can be idealized as a set of disconnected infinitesimally small "particles", which carry the required JxW information with the movement of the solid. deal.II has the ability to distribute and store such a set of particles in large-scale parallel computations in the form of the ParticleHandler class (for details on the implementation see [GLHPW2018]), and we will make use of this functionality in this tutorial.

        Thus, the approach taken in this step is as follows:

        This structure is relatively expensive to generate, but must only be generated once per simulation. Once the Particles::ParticleHandler is generated and the required information is attached to the particle, the integrals over $\Gamma$ can be carried out by exploiting the fact that particles are grouped cellwise inside ParticleHandler, allowing us to:

          -
        • Looping over all cells of $\Omega$ that contain at least one particle
        • +
        • Looping over all cells of $\Omega$ that contain at least one particle
        • Looping over all particles in the given cell
        • Compute the integrals and fill the global matrix.
        @@ -475,7 +475,7 @@
          angular_velocity.set_time(time);
          }
         
        -

        The remainder of the class consists largely of member variables that describe the details of the simulation and its discretization. The following parameters are about where output should land, the spatial and temporal discretization (the default is the $Q_2\times Q_1$ Taylor-Hood discretization which uses a polynomial degree of 2 for the velocity), and how many time steps should elapse before we generate graphical output again:

        +

        The remainder of the class consists largely of member variables that describe the details of the simulation and its discretization. The following parameters are about where output should land, the spatial and temporal discretization (the default is the $Q_2\times Q_1$ Taylor-Hood discretization which uses a polynomial degree of 2 for the velocity), and how many time steps should elapse before we generate graphical output again:

          std::string output_directory = ".";
         
          unsigned int velocity_degree = 2;
        @@ -800,7 +800,7 @@
          Particles::ParticleHandler<spacedim> tracer_particle_handler;
          Particles::ParticleHandler<spacedim> solid_particle_handler;
         
        -

        For every tracer particle, we need to compute the velocity field in its current position, and update its position using a discrete time stepping scheme. We do this using distributed linear algebra objects that store the coordinates of each particle's location or velocity. That is, these vectors have tracer_particle_handler.n_global_particles() * spacedim entries that we will store in a way so that parts of the vector are partitioned across all processes. (Implicitly, we here make the assumption that the spacedim coordinates of each particle are stored in consecutive entries of the vector.) Thus, we need to determine who the owner of each vector entry is. We set this owner to be equal to the process that generated that particle at time $t=0$. This information is stored for every process in the locally_owned_tracer_particle_coordinates IndexSet.

        +

        For every tracer particle, we need to compute the velocity field in its current position, and update its position using a discrete time stepping scheme. We do this using distributed linear algebra objects that store the coordinates of each particle's location or velocity. That is, these vectors have tracer_particle_handler.n_global_particles() * spacedim entries that we will store in a way so that parts of the vector are partitioned across all processes. (Implicitly, we here make the assumption that the spacedim coordinates of each particle are stored in consecutive entries of the vector.) Thus, we need to determine who the owner of each vector entry is. We set this owner to be equal to the process that generated that particle at time $t=0$. This information is stored for every process in the locally_owned_tracer_particle_coordinates IndexSet.

        Once the particles have been distributed around to match the process that owns the region where the particle lives, we will need read access from that process to the corresponding velocity field. We achieve this by filling a read only velocity vector field that contains the relevant information in ghost entries. This is achieved using the locally_relevant_tracer_particle_coordinates IndexSet, that keeps track of how things change during the simulation, i.e., it keeps track of where particles that the current process owns have ended up being, and who owns the particles that ended up in my subdomain.

        While this is not the most efficient strategy, we keep it this way to illustrate how things would work in a real fluid-structure interaction (FSI) problem. If a particle is linked to a specific solid degree of freedom, we are not free to choose who owns it, and we have to communicate this information around. We illustrate this here, and show that the communication pattern is point-to-point, and negligible in terms of total cost of the algorithm.

        The vectors defined based on these subdivisions are then used to store the particles velocities (read-only, with ghost entries) and their displacement (read/write, no ghost entries).

        @@ -1441,7 +1441,7 @@
         
         

        Solving the linear system

        -

        This function solves the linear system with FGMRES with a block diagonal preconditioner and an algebraic multigrid (AMG) method for the diagonal blocks. The preconditioner applies a V cycle to the $(0,0)$ (i.e., the velocity-velocity) block and a CG with the mass matrix for the $(1,1)$ block (which is our approximation to the Schur complement: the pressure mass matrix assembled above).

        +

        This function solves the linear system with FGMRES with a block diagonal preconditioner and an algebraic multigrid (AMG) method for the diagonal blocks. The preconditioner applies a V cycle to the $(0,0)$ (i.e., the velocity-velocity) block and a CG with the mass matrix for the $(1,1)$ block (which is our approximation to the Schur complement: the pressure mass matrix assembled above).

          template <int dim, int spacedim>
          void StokesImmersedProblem<dim, spacedim>::solve()
          {
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 2024-11-15 06:44:32.627700141 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_71.html 2024-11-15 06:44:32.647700319 +0000 @@ -162,7 +162,7 @@

        This program was contributed by Jean-Paul Pelteret.

        Introduction

        -

        The aim of this tutorial is, quite simply, to introduce the fundamentals of both automatic and symbolic differentiation (respectively abbreviated as AD and SD): Ways in which one can, in source code, describe a function $\mathbf f(\mathbf x)$ and automatically also obtain a representation of derivatives $\nabla \mathbf f(\mathbf x)$ (the "Jacobian"), $\nabla^2 \mathbf f(\mathbf x)$ (the "Hessian"), etc., without having to write additional lines of code. Doing this is quite helpful in solving nonlinear or optimization problems where one would like to only describe the nonlinear equation or the objective function in the code, without having to also provide their derivatives (which are necessary for a Newton method for solving a nonlinear problem, or for finding a minimizer).

        +

        The aim of this tutorial is, quite simply, to introduce the fundamentals of both automatic and symbolic differentiation (respectively abbreviated as AD and SD): Ways in which one can, in source code, describe a function $\mathbf f(\mathbf x)$ and automatically also obtain a representation of derivatives $\nabla \mathbf f(\mathbf x)$ (the "Jacobian"), $\nabla^2 \mathbf f(\mathbf x)$ (the "Hessian"), etc., without having to write additional lines of code. Doing this is quite helpful in solving nonlinear or optimization problems where one would like to only describe the nonlinear equation or the objective function in the code, without having to also provide their derivatives (which are necessary for a Newton method for solving a nonlinear problem, or for finding a minimizer).

        Since AD and SD tools are somewhat independent of finite elements and boundary value problems, this tutorial is going to be different to the others that you may have read beforehand. It will focus specifically on how these frameworks work and the principles and thinking behind them, and will forgo looking at them in the direct context of a finite element simulation.

        We will, in fact, look at two different sets of problems that have greatly different levels of complexity, but when framed properly hold sufficient similarity that the same AD and SD frameworks can be leveraged. With these examples the aim is to build up an understanding of the steps that are required to use the AD and SD tools, the differences between them, and hopefully identify where they could be immediately be used in order to improve or simplify existing code.

        It's plausible that you're wondering what AD and SD are, in the first place. Well, that question is easy to answer but without context is not very insightful. So we're not going to cover that in this introduction, but will rather defer this until the first introductory example where we lay out the key points as this example unfolds. To complement this, we should mention that the core theory for both frameworks is extensively discussed in the Automatic and symbolic differentiation topic, so it bears little repeating here.

        @@ -179,7 +179,7 @@

        Thermodynamic principles

        As a prelude to introducing the coupled magneto-mechanical material law that we'll use to model a magneto-active polymer, we'll start with a very concise summary of the salient thermodynamics to which these constitutive laws must subscribe. The basis for the theory, as summarized here, is described in copious detail by Truesdell and Toupin [Truesdell1960a] and Coleman and Noll [Coleman1963a], and follows the logic laid out by Holzapfel [Holzapfel2007a].

        Starting from the first law of thermodynamics, and following a few technical assumptions, it can be shown the balance between the kinetic plus internal energy rates and the power supplied to the system from external sources is given by the following relationship that equates the rate of change of the energy in an (arbitrary) volume $V$ on the left, and the sum of forces acting on that volume on the right:

        -\[
+<picture><source srcset=\[
   D_{t} \int\limits_{V} \left[
     \frac{1}{2} \rho_{0} \mathbf{v} \cdot \mathbf{v}
     + U^{*}_{0} \right] dV
@@ -191,31 +191,31 @@
   - D_{t} M^{*}_{0}
   - \nabla_{0} \cdot \mathbf{Q}
   + R_{0} \right] dV .
-\] +\]" src="form_6898.png"/>

        -

        Here $D_{t}$ represents the total time derivative, $\rho_{0}$ is the material density as measured in the Lagrangian reference frame, $\mathbf{v}$ is the material velocity and $\mathbf{a}$ its acceleration, $U^{*}_{0}$ is the internal energy per unit reference volume, $\mathbf{P}^{\text{tot}}$ is the total Piola stress tensor and $\dot{\mathbf{F}}$ is the time rate of the deformation gradient tensor, $\boldsymbol{\mathbb{H}}$ and $\boldsymbol{\mathbb{B}}$ are, respectively, the magnetic field vector and the magnetic induction (or magnetic flux density) vector, $\mathbb{E}$ and $\mathbb{D}$ are the electric field vector and electric displacement vector, and $\mathbf{Q}$ and $R_{0}$ represent the referential thermal flux vector and thermal source. The material differential operator $\nabla_{0} (\bullet) \dealcoloneq \frac{d(\bullet)}{d\mathbf{X}}$ where $\mathbf{X}$ is the material position vector. With some rearrangement of terms, invoking the arbitrariness of the integration volume $V$, the total internal energy density rate $\dot{E}_{0}$ can be identified as

        -\[
+<p> Here <picture><source srcset=$D_{t}$ represents the total time derivative, $\rho_{0}$ is the material density as measured in the Lagrangian reference frame, $\mathbf{v}$ is the material velocity and $\mathbf{a}$ its acceleration, $U^{*}_{0}$ is the internal energy per unit reference volume, $\mathbf{P}^{\text{tot}}$ is the total Piola stress tensor and $\dot{\mathbf{F}}$ is the time rate of the deformation gradient tensor, $\boldsymbol{\mathbb{H}}$ and $\boldsymbol{\mathbb{B}}$ are, respectively, the magnetic field vector and the magnetic induction (or magnetic flux density) vector, $\mathbb{E}$ and $\mathbb{D}$ are the electric field vector and electric displacement vector, and $\mathbf{Q}$ and $R_{0}$ represent the referential thermal flux vector and thermal source. The material differential operator $\nabla_{0} (\bullet) \dealcoloneq \frac{d(\bullet)}{d\mathbf{X}}$ where $\mathbf{X}$ is the material position vector. With some rearrangement of terms, invoking the arbitrariness of the integration volume $V$, the total internal energy density rate $\dot{E}_{0}$ can be identified as

        +\[
   \dot{E}_{0}
 = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
   - \nabla_{0} \cdot \mathbf{Q}
   + R_{0} .
-\] +\]" src="form_6912.png"/>

        The total internal energy includes contributions that arise not only due to mechanical deformation (the first term), and thermal fluxes and sources (the fourth and fifth terms), but also due to the intrinsic energy stored in the magnetic and electric fields themselves (the second and third terms, respectively).

        The second law of thermodynamics, known also as the entropy inequality principle, informs us that certain thermodynamic processes are irreversible. After accounting for the total entropy and rate of entropy input, the Clausius-Duhem inequality can be derived. In local form (and in the material configuration), this reads

        -\[
+<picture><source srcset=\[
   \theta \dot{\eta}_{0}
   - R_{0}
   + \nabla_{0} \cdot \mathbf{Q}
   - \frac{1}{\theta} \nabla_{0} \theta \cdot \mathbf{Q}
   \geq 0 .
-\] +\]" src="form_6913.png"/>

        -

        The quantity $\theta$ is the absolute temperature, and $\eta_{0}$ represents the entropy per unit reference volume.

        -

        Using this to replace $R_{0} - \nabla_{0} \cdot \mathbf{Q}$ in the result stemming from the first law of thermodynamics, we now have the relation

        -\[
+<p> The quantity <picture><source srcset=$\theta$ is the absolute temperature, and $\eta_{0}$ represents the entropy per unit reference volume.

        +

        Using this to replace $R_{0} - \nabla_{0} \cdot \mathbf{Q}$ in the result stemming from the first law of thermodynamics, we now have the relation

        +\[
   \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
@@ -223,26 +223,26 @@
   - \dot{E}_{0}
   - \frac{1}{\theta} \nabla_{0} \theta \cdot \mathbf{Q}
   \geq 0 .
-\] +\]" src="form_6916.png"/>

        On the basis of Fourier's law, which informs us that heat flows from regions of high temperature to low temperature, the last term is always positive and can be ignored. This renders the local dissipation inequality

        -\[
+<picture><source srcset=\[
   \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   + \mathbb{E} \cdot \dot{\mathbb{D}}
   - \left[ \dot{E}_{0} - \theta \dot{\eta}_{0}  \right]
   \geq 0 .
-\] +\]" src="form_6917.png"/>

        It is postulated [Holzapfel2007a] that the Legendre transformation

        -\[
+<picture><source srcset=\[
   \psi^{*}_{0}
 = \psi^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}}, \mathbb{D}, \theta \right)
 = E_{0} - \theta \eta_{0} ,
-\] +\]" src="form_6918.png"/>

        -

        from which we may define the free energy density function $\psi^{*}_{0}$ with the stated parameterization, exists and is valid. Taking the material rate of this equation and substituting it into the local dissipation inequality results in the generic expression

        -\[
+<p> from which we may define the free energy density function <picture><source srcset=$\psi^{*}_{0}$ with the stated parameterization, exists and is valid. Taking the material rate of this equation and substituting it into the local dissipation inequality results in the generic expression

        +\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
@@ -250,104 +250,104 @@
   - \dot{\theta} \eta_{0}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}}, \mathbb{D}, \theta \right)
   \geq 0 .
-\] +\]" src="form_6920.png"/>

        Under the assumption of isothermal conditions, and that the electric field does not excite the material in a manner that is considered non-negligible, then this dissipation inequality reduces to

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \boldsymbol{\mathbb{B}} \right)
   \geq 0 .
-\] +\]" src="form_6921.png"/>

        Constitutive laws

        When considering materials that exhibit mechanically dissipative behavior, it can be shown that this can be captured within the dissipation inequality through the augmentation of the material free energy density function with additional parameters that represent internal variables [Holzapfel1996a]. Consequently, we write it as

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{P}^{\text{tot}} : \dot{\mathbf{F}}
   + \boldsymbol{\mathbb{H}} \cdot \dot{\boldsymbol{\mathbb{B}}}
   - \dot{\psi}^{*}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{B}} \right)
   \geq 0 .
-\] +\]" src="form_6922.png"/>

        -

        where $\mathbf{F}_{v}^{i} = \mathbf{F}_{v}^{i} \left( t \right)$ represents the internal variable (which acts like a measure of the deformation gradient) associated with the ith mechanical dissipative (viscous) mechanism. As can be inferred from its parameterization, each of these internal parameters is considered to evolve in time. Currently the free energy density function $\psi^{*}_{0}$ is parameterized in terms of the magnetic induction $\boldsymbol{\mathbb{B}}$. This is the natural parameterization that comes as a consequence of the considered balance laws. Should such a class of materials to be incorporated within a finite-element model, it would be ascertained that a certain formulation of the magnetic problem, known as the magnetic vector potential formulation, would need to be adopted. This has its own set of challenges, so where possible the more simple magnetic scalar potential formulation may be preferred. In that case, the magnetic problem needs to be parameterized in terms of the magnetic field $\boldsymbol{\mathbb{H}}$. To make this re-parameterization, we execute a final Legendre transformation

        -\[
+<p> where <picture><source srcset=$\mathbf{F}_{v}^{i} = \mathbf{F}_{v}^{i} \left( t \right)$ represents the internal variable (which acts like a measure of the deformation gradient) associated with the ith mechanical dissipative (viscous) mechanism. As can be inferred from its parameterization, each of these internal parameters is considered to evolve in time. Currently the free energy density function $\psi^{*}_{0}$ is parameterized in terms of the magnetic induction $\boldsymbol{\mathbb{B}}$. This is the natural parameterization that comes as a consequence of the considered balance laws. Should such a class of materials to be incorporated within a finite-element model, it would be ascertained that a certain formulation of the magnetic problem, known as the magnetic vector potential formulation, would need to be adopted. This has its own set of challenges, so where possible the more simple magnetic scalar potential formulation may be preferred. In that case, the magnetic problem needs to be parameterized in terms of the magnetic field $\boldsymbol{\mathbb{H}}$. To make this re-parameterization, we execute a final Legendre transformation

        +\[
   \tilde{\psi}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   = \psi^{*}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{B}} \right)
   - \boldsymbol{\mathbb{H}} \cdot \boldsymbol{\mathbb{B}} .
-\] +\]" src="form_6924.png"/>

        At the same time, we may take advantage of the principle of material frame indifference in order to express the energy density function in terms of symmetric deformation measures:

        -\[
+<picture><source srcset=\[
   \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   = \tilde{\psi}_{0} \left( \mathbf{F}, \mathbf{F}_{v}^{i}, \boldsymbol{\mathbb{H}} \right) .
-\] +\]" src="form_6925.png"/>

        The upshot of these two transformations (leaving out considerable explicit and hidden details) renders the final expression for the reduced dissipation inequality as

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \mathbf{S}^{\text{tot}} : \frac{1}{2} \dot{\mathbf{C}}
   - \boldsymbol{\mathbb{B}} \cdot \dot{\boldsymbol{\mathbb{H}}}
   - \dot{\psi}_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \geq 0 .
-\] +\]" src="form_6926.png"/>

        -

        (Notice the sign change on the second term on the right hand side, and the transfer of the time derivative to the magnetic induction vector.) The stress quantity $\mathbf{S}^{\text{tot}}$ is known as the total Piola-Kirchhoff stress tensor and its energy conjugate $\mathbf{C} = \mathbf{F}^{T} \cdot \mathbf{F}$ is the right Cauchy-Green deformation tensor, and $\mathbf{C}_{v}^{i} = \mathbf{C}_{v}^{i} \left( t \right)$ is the re-parameterized internal variable associated with the ith mechanical dissipative (viscous) mechanism.

        +

        (Notice the sign change on the second term on the right hand side, and the transfer of the time derivative to the magnetic induction vector.) The stress quantity $\mathbf{S}^{\text{tot}}$ is known as the total Piola-Kirchhoff stress tensor and its energy conjugate $\mathbf{C} = \mathbf{F}^{T} \cdot \mathbf{F}$ is the right Cauchy-Green deformation tensor, and $\mathbf{C}_{v}^{i} = \mathbf{C}_{v}^{i} \left( t \right)$ is the re-parameterized internal variable associated with the ith mechanical dissipative (viscous) mechanism.

        Expansion of the material rate of the energy density function, and rearrangement of the various terms, results in the expression

        -\[
+<picture><source srcset=\[
   \mathcal{D}_{\text{int}}
   = \left[ \mathbf{S}^{\text{tot}} - 2 \frac{\partial \psi_{0}}{\partial \mathbf{C}} \right] : \frac{1}{2} \dot{\mathbf{C}}
   - \sum\limits_{i}\left[ 2 \frac{\partial \psi_{0}}{\partial \mathbf{C}_{v}^{i}} \right] : \frac{1}{2} \dot{\mathbf{C}}_{v}^{i}
   + \left[ - \boldsymbol{\mathbb{B}} - \frac{\partial \psi_{0}}{\partial \boldsymbol{\mathbb{H}}} \right] \cdot \dot{\boldsymbol{\mathbb{H}}}
   \geq 0 .
-\] +\]" src="form_6930.png"/>

        -

        At this point, its worth noting the use of the partial derivatives $\partial \left( \bullet \right)$. This is an important detail that will be fundamental to a certain design choice made within the tutorial. As brief reminder of what this signifies, the partial derivative of a multi-variate function returns the derivative of that function with respect to one of those variables while holding the others constant:

        -\[
+<p> At this point, its worth noting the use of the <a href=partial derivatives $\partial \left( \bullet \right)$. This is an important detail that will be fundamental to a certain design choice made within the tutorial. As brief reminder of what this signifies, the partial derivative of a multi-variate function returns the derivative of that function with respect to one of those variables while holding the others constant:

        +\[
   \frac{\partial f\left(x, y\right)}{\partial x}
   = \frac{d f\left(x, y\right)}{d x} \Big\vert_{y} .
-\] +\]" src="form_6932.png"/>

        -

        More specific to what's encoded in the dissipation inequality (with the very general free energy density function $\psi_{0}$ with its parameterization yet to be formalized), if one of the input variables is a function of another, it is also held constant and the chain rule does not propagate any further, while the computing total derivative would imply judicious use of the chain rule. This can be better understood by comparing the following two statements:

        -\begin{align*}
+<p> More specific to what's encoded in the dissipation inequality (with the very general free energy density function <picture><source srcset=$\psi_{0}$ with its parameterization yet to be formalized), if one of the input variables is a function of another, it is also held constant and the chain rule does not propagate any further, while the computing total derivative would imply judicious use of the chain rule. This can be better understood by comparing the following two statements:

        +\begin{align*}
   \frac{\partial f\left(x, y\left(x\right)\right)}{\partial x}
   &= \frac{d f\left(x, y\left(x\right)\right)}{d x} \Big\vert_{y} \\
   \frac{d f\left(x, y\left(x\right)\right)}{d x}
   &= \frac{d f\left(x, y\left(x\right)\right)}{d x} \Big\vert_{y}
    + \frac{d f\left(x, y\left(x\right)\right)}{d y} \Big\vert_{x} \frac{d y\left(x\right)}{x} .
-\end{align*} +\end{align*}" src="form_6934.png"/>

        -

        Returning to the thermodynamics of the problem, we next exploit the arbitrariness of the quantities $\dot{\mathbf{C}}$ and $\dot{\boldsymbol{\mathbb{H}}}$, by application of the Coleman-Noll procedure [Coleman1963a], [Coleman1967a]. This leads to the identification of the kinetic conjugate quantities

        -\[
+<p>Returning to the thermodynamics of the problem, we next exploit the arbitrariness of the quantities <picture><source srcset=$\dot{\mathbf{C}}$ and $\dot{\boldsymbol{\mathbb{H}}}$, by application of the Coleman-Noll procedure [Coleman1963a], [Coleman1967a]. This leads to the identification of the kinetic conjugate quantities

        +\[
   \mathbf{S}^{\text{tot}}
   = \mathbf{S}^{\text{tot}} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \dealcoloneq 2 \frac{\partial \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)}{\partial \mathbf{C}} , \\
   \boldsymbol{\mathbb{B}}
   = \boldsymbol{\mathbb{B}} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)
   \dealcoloneq - \frac{\partial \psi_{0} \left( \mathbf{C}, \mathbf{C}_{v}^{i}, \boldsymbol{\mathbb{H}} \right)}{\partial \boldsymbol{\mathbb{H}}} .
-\] +\]" src="form_6937.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2024-11-15 06:44:32.715700926 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_72.html 2024-11-15 06:44:32.715700926 +0000 @@ -154,48 +154,48 @@

        Introduction

        Motivation

        This program solves the same problem as step-15, that is, it solves for the minimal surface equation

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     F(u) \dealcoloneq -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right) &= 0 \qquad
     \qquad &&\textrm{in} ~ \Omega
     \\
     u&=g \qquad\qquad &&\textrm{on} ~ \partial \Omega.
-  \end{align*} + \end{align*}" src="form_7064.png"/>

        Among the issues we had identified there (see the Possibilities for extensions section) was that when wanting to use a Newton iteration, we needed to compute the derivative of the residual of the equation with regard to the solution $u$ (here, because the right hand side is zero, the residual is simply the left hand side). For the equation we have here, this is cumbersome but not impossible – but one can easily imagine much more complicated equations where just implementing the residual itself correctly is a challenge, let alone doing so for the derivative necessary to compute the Jacobian matrix. We will address this issue in this program: Using the automatic differentiation techniques discussed in great detail in step-71, we will come up with a way how we only have to implement the residual and get the Jacobian for free.

        In fact, we can even go one step further. While in step-15 we have just taken the equation as a given, the minimal surface equation is actually the product of minimizing an energy. Specifically, the minimal surface equations are the Euler-Lagrange equations that correspond to minimizing the energy

        -\[
+<picture><source srcset=\[
     E(u) = \int_\Omega \Psi \left( u \right)
-  \] + \]" src="form_7065.png"/>

        where the energy density is given by

        -\[
+<picture><source srcset=\[
     \Psi \left( u \right) = \sqrt{1+|\nabla u|^{2}}.
-  \] + \]" src="form_7066.png"/>

        This is the same as saying that we seek to find the stationary point of the variation of the energy functional

        -\[
+<picture><source srcset=\[
     \min\limits_{u} E \left( u \right)
       \quad \rightarrow \quad
       \delta E \left( u, \varphi \right) \dealcoloneq
       \left(\varphi, F(u)\right) = 0
       \qquad
       \forall \varphi,
-  \] + \]" src="form_7067.png"/>

        as this is where the equilibrium solution to the boundary value problem lies.

        -

        The key point then is that, maybe, we don't even need to implement the residual, but that implementing the simpler energy density $\Psi(u)$ might actually be enough.

        +

        The key point then is that, maybe, we don't even need to implement the residual, but that implementing the simpler energy density $\Psi(u)$ might actually be enough.

        Our goal then is this: When using a Newton iteration, we need to repeatedly solve the linear partial differential equation

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     F'(u^{n},\delta u^{n}) &=- F(u^{n})
-  \end{align*} + \end{align*}" src="form_3062.png"/>

        so that we can compute the update

        -\begin{align*}
+<picture><source srcset=\begin{align*}
     u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
-  \end{align*} + \end{align*}" src="form_3063.png"/>

        -

        with the solution $\delta u^{n}$ of the Newton step. As discussed in step-15, we can compute the derivative $F'(u,\delta u)$ by hand and obtain

        -\[
+<p> with the solution <picture><source srcset=$\delta u^{n}$ of the Newton step. As discussed in step-15, we can compute the derivative $F'(u,\delta u)$ by hand and obtain

        +\[
   F'(u,\delta u)
   =
   - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
@@ -203,76 +203,76 @@
   \nabla \cdot \left( \frac{\nabla u \cdot
   \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
   \right).
-  \] + \]" src="form_3064.png"/>

        -

        So here then is what this program is about: It is about techniques that can help us with computing $F'(u,\delta u)$ without having to implement it explicitly, either by providing an implementation of $F(u)$ or an implementation of $E(u)$. More precisely, we will implement three different approaches and compare them in terms of run-time but also – maybe more importantly – how much human effort it takes to implement them:

          +

          So here then is what this program is about: It is about techniques that can help us with computing $F'(u,\delta u)$ without having to implement it explicitly, either by providing an implementation of $F(u)$ or an implementation of $E(u)$. More precisely, we will implement three different approaches and compare them in terms of run-time but also – maybe more importantly – how much human effort it takes to implement them:

          • The method used in step-15 to form the Jacobian matrix.
          • -
          • Computing the Jacobian matrix from an implementation of the residual $F(u)$, using automatic differentiation.
          • -
          • Computing both the residual and Jacobian matrix from an implementation of the energy functional $E(u)$, also using automatic differentiation.
          • +
          • Computing the Jacobian matrix from an implementation of the residual $F(u)$, using automatic differentiation.
          • +
          • Computing both the residual and Jacobian matrix from an implementation of the energy functional $E(u)$, also using automatic differentiation.

          For the first of these methods, there are no conceptual changes compared to step-15.

          Computing the Jacobian from the residual

          -

          For the second method, let us outline how we will approach the issue using automatic differentiation to compute the linearization of the residual vector. To this end, let us change notation for a moment and denote by $F(U)$ not the residual of the differential equation, but in fact the residual vector – i.e., the discrete residual. We do so because that is what we actually do when we discretize the problem on a given mesh: We solve the problem $F(U)=0$ where $U$ is the vector of unknowns.

          -

          More precisely, the $i$th component of the residual is given by

          -\[
+<p>For the second method, let us outline how we will approach the issue using automatic differentiation to compute the linearization of the residual vector. To this end, let us change notation for a moment and denote by <picture><source srcset=$F(U)$ not the residual of the differential equation, but in fact the residual vector – i.e., the discrete residual. We do so because that is what we actually do when we discretize the problem on a given mesh: We solve the problem $F(U)=0$ where $U$ is the vector of unknowns.

          +

          More precisely, the $i$th component of the residual is given by

          +\[
   F(U)_i \dealcoloneq
   \int\limits_{\Omega}\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
   u|^{2}}} \nabla u \right] \, dV ,
-\] +\]" src="form_7071.png"/>

          -

          where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$. Given this, the contribution for cell $K$ is

          -\[
+<p> where <picture><source srcset=$u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$. Given this, the contribution for cell $K$ is

          +\[
   F(U)_i^K \dealcoloneq
   \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
   u|^{2}}} \nabla u \right] \, dV ,
-\] +\]" src="form_7073.png"/>

          Its first order Taylor expansion is given as

          -\[
+<picture><source srcset=\[
   F(U + \delta U)_i^K
   \approx F(U)_i^K
   + \sum_{j}^{n_{\textrm{dofs}}} \left[ \frac{\partial F(U)_i^K}{\partial
   U_j} \delta U_j \right],
-\] +\]" src="form_7074.png"/>

          -

          and consequently we can compute the contribution of cell $K$ to the Jacobian matrix $J$ as $J(U)_{ij}^K = \frac{\partial F(U)_i^K}{\partial U_j}$. The important point here is that on cell $K$, we can express

          -\[
+<p> and consequently we can compute the contribution of cell <picture><source srcset=$K$ to the Jacobian matrix $J$ as $J(U)_{ij}^K = \frac{\partial F(U)_i^K}{\partial U_j}$. The important point here is that on cell $K$, we can express

          +\[
   F(U)_i^K \dealcoloneq
   \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+\left|
   \sum_{j'}^{n_\textrm{dofs}} U_{j'} \nabla \varphi_{j'}\right|^{2}}}
   \left(\sum_{j''}^{n_\textrm{dofs}} U_{j''} \nabla \varphi_{j''}\right)\right] \, dV.
-\] +\]" src="form_7076.png"/>

          -

          For clarity, we have used $j'$ and $j''$ as counting indices to make clear that they are distinct from each other and from $j$ above. Because in this formula, $F(U)$ only depends on the coefficients $U_j$, we can compute the derivative $J(U)_{ij}^K$ as a matrix via automatic differentiation of $F(U)_i^K$. By the same argument as we always use, it is clear that $F(U)^K$ does not actually depend on all unknowns $U_j$, but only on those unknowns for which $j$ is a shape function that lives on cell $K$, and so in practice, we restrict $F(U)^K$ and $J(U)^K$ to that part of the vector and matrix that corresponds to the local DoF indices, and then distribute from the local cell $K$ to the global objects.

          -

          Using all of these realizations, the approach will then be to implement $F(U)^K$ in the program and let the automatic differentiation machinery compute the derivatives $J(U)^K$ from that.

          +

          For clarity, we have used $j'$ and $j''$ as counting indices to make clear that they are distinct from each other and from $j$ above. Because in this formula, $F(U)$ only depends on the coefficients $U_j$, we can compute the derivative $J(U)_{ij}^K$ as a matrix via automatic differentiation of $F(U)_i^K$. By the same argument as we always use, it is clear that $F(U)^K$ does not actually depend on all unknowns $U_j$, but only on those unknowns for which $j$ is a shape function that lives on cell $K$, and so in practice, we restrict $F(U)^K$ and $J(U)^K$ to that part of the vector and matrix that corresponds to the local DoF indices, and then distribute from the local cell $K$ to the global objects.

          +

          Using all of these realizations, the approach will then be to implement $F(U)^K$ in the program and let the automatic differentiation machinery compute the derivatives $J(U)^K$ from that.

          Computing the Jacobian and the residual from the energy functional

          For the final implementation of the assembly process, we will move a level higher than the residual: our entire linear system will be determined directly from the energy functional that governs the physics of this boundary value problem. We can take advantage of the fact that we can calculate the total energy in the domain directly from the local contributions, i.e.,

          -\[
+<picture><source srcset=\[
   E \left( U \right) \dealcoloneq \int\limits_{\Omega} \Psi \left( u
   \right) \, dV .
-\] +\]" src="form_7083.png"/>

          In the discrete setting, this means that on each finite element we have

          -\[
+<picture><source srcset=\[
    E \left( U \right)^K
     \dealcoloneq \int\limits_{K} \Psi \left( u \right) \, dV
     \approx \sum\limits_{q}^{n_{\textrm{q-points}}} \Psi \left( u \left(
     \mathbf{x}_{q} \right) \right) \underbrace{\vert J_{q} \vert \times W_{q}}_{\text{JxW(q)}} .
-\] +\]" src="form_7084.png"/>

          If we implement the cell energy, which depends on the field solution, we can compute its first (discrete) variation

          -\[
+<picture><source srcset=\[
   F(U)^K_i
     = \frac{\partial E(U)^K}{\partial U_i}
-\] +\]" src="form_7085.png"/>

          and, thereafter, its second (discrete) variation

          -\[
+<picture><source srcset=\[
   J(U)^K_{ij}
     = \frac{\partial^{2}  E(U)^K}{\partial U_i \partial U_j}.
-\] +\]" src="form_7086.png"/>

          -

          So, from the cell contribution to the total energy function, we may expect to have the approximate residual and tangent contributions generated for us as long as we can provide an implementation of the local energy $E(U)^K$. Again, due to the design of the automatic differentiation variables used in this tutorial, in practice these approximations for the contributions to the residual vector and tangent matrix are actually accurate to machine precision.

          +

          So, from the cell contribution to the total energy function, we may expect to have the approximate residual and tangent contributions generated for us as long as we can provide an implementation of the local energy $E(U)^K$. Again, due to the design of the automatic differentiation variables used in this tutorial, in practice these approximations for the contributions to the residual vector and tangent matrix are actually accurate to machine precision.

          The commented program

          The majority of this tutorial is an exact replica of step-15. So, in the interest of brevity and maintaining a focus on the changes implemented here, we will only document what's new and simply indicate which sections of code are a repetition of what has come before.

          Include files

          @@ -587,15 +587,15 @@
          void mesh_loop(const CellIteratorType &begin, const CellIteratorType &end, const CellWorkerFunctionType &cell_worker, const CopierType &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const AssembleFlags flags=assemble_own_cells, const BoundaryWorkerFunctionType &boundary_worker=BoundaryWorkerFunctionType(), const FaceWorkerFunctionType &face_worker=FaceWorkerFunctionType(), const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
          Definition mesh_loop.h:281

        Assembly via differentiation of the residual vector
        -

        As outlined in the introduction, what we need to do for this second approach is implement the local contributions $F(U)^K$ from cell $K$ to the residual vector, and then let the AD machinery deal with how to compute the derivatives $J(U)_{ij}^K=\frac{\partial F(U)^K_i}{\partial U_j}$ from it.

        +

        As outlined in the introduction, what we need to do for this second approach is implement the local contributions $F(U)^K$ from cell $K$ to the residual vector, and then let the AD machinery deal with how to compute the derivatives $J(U)_{ij}^K=\frac{\partial F(U)^K_i}{\partial U_j}$ from it.

        For the following, recall that

        -\[
+<picture><source srcset=\[
    F(U)_i^K \dealcoloneq
    \int\limits_K\nabla \varphi_i \cdot \left[ \frac{1}{\sqrt{1+|\nabla
    u|^{2}}} \nabla u \right] \, dV ,
-   \] + \]" src="form_7089.png"/>

        -

        where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$.

        +

        where $u(\mathbf x)=\sum_j U_j \varphi_j(\mathbf x)$.

        Let us see how this is implemented in practice:

          template <int dim>
          void MinimalSurfaceProblem<dim>::assemble_system_with_residual_linearization()
        @@ -641,12 +641,12 @@
          copy_data.local_dof_indices[0];
          cell->get_dof_indices(local_dof_indices);
         
        -

        We'll now create and initialize an instance of the AD helper class. To do this, we need to specify how many independent variables and dependent variables there are. The independent variables will be the number of local degrees of freedom that our solution vector has, i.e., the number $j$ in the per-element representation of the discretized solution vector $u (\mathbf{x})|_K = \sum\limits_{j} U^K_i \varphi_j(\mathbf{x})$ that indicates how many solution coefficients are associated with each finite element. In deal.II, this equals FiniteElement::dofs_per_cell. The number of dependent variables will be the number of entries in the local residual vector that we will be forming. In this particular problem (like many others that employ the standard Galerkin method) the number of local solution coefficients matches the number of local residual equations.

        +

        We'll now create and initialize an instance of the AD helper class. To do this, we need to specify how many independent variables and dependent variables there are. The independent variables will be the number of local degrees of freedom that our solution vector has, i.e., the number $j$ in the per-element representation of the discretized solution vector $u (\mathbf{x})|_K = \sum\limits_{j} U^K_i \varphi_j(\mathbf{x})$ that indicates how many solution coefficients are associated with each finite element. In deal.II, this equals FiniteElement::dofs_per_cell. The number of dependent variables will be the number of entries in the local residual vector that we will be forming. In this particular problem (like many others that employ the standard Galerkin method) the number of local solution coefficients matches the number of local residual equations.

          const unsigned int n_independent_variables = local_dof_indices.size();
          const unsigned int n_dependent_variables = dofs_per_cell;
          ADHelper ad_helper(n_independent_variables, n_dependent_variables);
         
        /usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 2024-11-15 06:44:32.783701534 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_74.html 2024-11-15 06:44:32.783701534 +0000 @@ -157,28 +157,28 @@

        The equation

        In this example, we consider Poisson's equation

        -\[
+<picture><source srcset=\[
 - \nabla \cdot \left( \nu  \nabla u\right) = f  \qquad   \mbox{in } \Omega,
-\] +\]" src="form_7096.png"/>

        subject to the boundary condition

        -\[
+<picture><source srcset=\[
 u = g_D \qquad \mbox{on } \partial \Omega.
-\] +\]" src="form_7097.png"/>

        -

        For simplicity, we assume that the diffusion coefficient $\nu$ is constant here. Note that if $\nu$ is discontinuous, we need to take this into account when computing jump terms on cell faces.

        -

        We denote the mesh by ${\mathbb T}_h$, and $K\in{\mathbb T}_h$ is a mesh cell. The sets of interior and boundary faces are denoted by ${\mathbb F}^i_h$ and ${\mathbb F}^b_h$ respectively. Let $K^0$ and $K^1$ be the two cells sharing a face $f\in F_h^i$, and $\mathbf n$ be the outer normal vector of $K^0$. Then the jump operator is given by the "here minus there" formula,

        -\[
+<p> For simplicity, we assume that the diffusion coefficient <picture><source srcset=$\nu$ is constant here. Note that if $\nu$ is discontinuous, we need to take this into account when computing jump terms on cell faces.

        +

        We denote the mesh by ${\mathbb T}_h$, and $K\in{\mathbb T}_h$ is a mesh cell. The sets of interior and boundary faces are denoted by ${\mathbb F}^i_h$ and ${\mathbb F}^b_h$ respectively. Let $K^0$ and $K^1$ be the two cells sharing a face $f\in F_h^i$, and $\mathbf n$ be the outer normal vector of $K^0$. Then the jump operator is given by the "here minus there" formula,

        +\[
 \jump{v} = v^0 - v^1
-\] +\]" src="form_7105.png"/>

        and the averaging operator as

        -\[
+<picture><source srcset=\[
 \average{v} = \frac{v^0 + v^1}{2}
-\] +\]" src="form_7106.png"/>

        -

        respectively. Note that when $f\subset \partial \Omega$, we define $\jump{v} = v$ and $\average{v}=v$. The discretization using the SIPG is given by the following weak formula (more details can be found in [di2011mathematical] and the references therein)

        -\begin{align*}
+<p> respectively. Note that when <picture><source srcset=$f\subset \partial \Omega$, we define $\jump{v} = v$ and $\average{v}=v$. The discretization using the SIPG is given by the following weak formula (more details can be found in [di2011mathematical] and the references therein)

        +\begin{align*}
 &\sum_{K\in {\mathbb T}_h} (\nabla v_h, \nu \nabla u_h)_K\\
 &-\sum_{F \in F_h^i} \left\{
     \left< \jump{v_h}, \nu\average{ \nabla u_h} \cdot  \mathbf n \right>_F
@@ -194,52 +194,52 @@
   - \sum_{F \in F_h^b} \left\{
     \left< \nabla v_h \cdot \mathbf n, \nu g_D\right>_F - \left<v_h,\nu \sigma g_D\right>_F
   \right\}.
-\end{align*} +\end{align*}" src="form_7110.png"/>

        The penalty parameter

        -

        The penalty parameter is defined as $\sigma = \gamma/h_f$, where $h_f$ a local length scale associated with the cell face; here we choose an approximation of the length of the cell in the direction normal to the face: $\frac 1{h_f} = \frac 12 \left(\frac 1{h_K} + \frac 1{h_{K'}}\right)$, where $K,K'$ are the two cells adjacent to the face $f$ and we we compute $h_K = \frac{|K|}{|f|}$.

        -

        In the formula above, $\gamma$ is the penalization constant. To ensure the discrete coercivity, the penalization constant has to be large enough [ainsworth2007posteriori]. People do not really have consensus on which of the formulas proposed in the literature should be used. (This is similar to the situation discussed in the "Results" section of step-47.) One can just pick a large constant, while other options could be the multiples of $(p+1)^2$ or $p(p+1)$. In this code, we follow step-39 and use $\gamma = p(p+1)$.

        +

        The penalty parameter is defined as $\sigma = \gamma/h_f$, where $h_f$ a local length scale associated with the cell face; here we choose an approximation of the length of the cell in the direction normal to the face: $\frac 1{h_f} = \frac 12 \left(\frac 1{h_K} + \frac 1{h_{K'}}\right)$, where $K,K'$ are the two cells adjacent to the face $f$ and we we compute $h_K = \frac{|K|}{|f|}$.

        +

        In the formula above, $\gamma$ is the penalization constant. To ensure the discrete coercivity, the penalization constant has to be large enough [ainsworth2007posteriori]. People do not really have consensus on which of the formulas proposed in the literature should be used. (This is similar to the situation discussed in the "Results" section of step-47.) One can just pick a large constant, while other options could be the multiples of $(p+1)^2$ or $p(p+1)$. In this code, we follow step-39 and use $\gamma = p(p+1)$.

        A posteriori error estimator

        In this example, with a slight modification, we use the error estimator by Karakashian and Pascal [karakashian2003posteriori]

        -\[
+<picture><source srcset=\[
 \eta^2 = \sum_{K \in {\mathbb T}_h} \eta^2_{K} +  \sum_{f_i \in {\mathbb F}^i_h}  \eta^2_{f_i} + \sum_{f_b \in F^i_b}\eta^2_{f_b}
-\] +\]" src="form_7117.png"/>

        where

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \eta^2_{K} &= h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2,
 \\
 \eta^2_{f_i} &= \sigma \left\| \jump{u_h}  \right\|_f^2   +  h_f \left\|  \jump{\nu \nabla u_h} \cdot \mathbf n   \right\|_f^2,
 \\
 \eta_{f_b}^2 &=  \sigma \left\| u_h-g_D \right\|_f^2.
-\end{align*} +\end{align*}" src="form_7118.png"/>

        -

        Here we use $\sigma = \gamma/h_f$ instead of $\gamma^2/h_f$ for the jump terms of $u_h$ (the first term in $\eta^2_{f_i}$ and $\eta_{f_b}^2$).

        +

        Here we use $\sigma = \gamma/h_f$ instead of $\gamma^2/h_f$ for the jump terms of $u_h$ (the first term in $\eta^2_{f_i}$ and $\eta_{f_b}^2$).

        In order to compute this estimator, in each cell $K$ we compute

        -\begin{align*}
+<picture><source srcset=\begin{align*}
 \eta_{c}^2 &= h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2,
 \\
 \eta_{f}^2 &= \sum_{f\in \partial K}\lbrace \sigma \left\| \jump{u_h}  \right\|_f^2   +  h_f \left\|  \jump{\nu \nabla u_h} \cdot \mathbf n  \right\|_f^2 \rbrace,
 \\
 \eta_{b}^2 &= \sum_{f\in \partial K \cap \partial \Omega}  \sigma \left\| (u_h -g_D)  \right\|_f^2.
-\end{align*} +\end{align*}" src="form_7122.png"/>

        Then the square of the error estimate per cell is

        -\[
+<picture><source srcset=\[
 \eta_\text{local}^2 =\eta_{c}^2+0.5\eta_{f}^2+\eta_{b}^2.
-\] +\]" src="form_7123.png"/>

        -

        The factor of $0.5$ results from the fact that the overall error estimator includes each interior face only once, and so the estimators per cell count it with a factor of one half for each of the two adjacent cells. Note that we compute $\eta_\text{local}^2$ instead of $\eta_\text{local}$ to simplify the implementation. The error estimate square per cell is then stored in a global vector, whose $l_1$ norm is equal to $\eta^2$.

        +

        The factor of $0.5$ results from the fact that the overall error estimator includes each interior face only once, and so the estimators per cell count it with a factor of one half for each of the two adjacent cells. Note that we compute $\eta_\text{local}^2$ instead of $\eta_\text{local}$ to simplify the implementation. The error estimate square per cell is then stored in a global vector, whose $l_1$ norm is equal to $\eta^2$.

        The test case

        -

        In the first test problem, we run a convergence test using a smooth manufactured solution with $\nu =1$ in 2D

        -\begin{align*}
+<p>In the first test problem, we run a convergence test using a smooth manufactured solution with <picture><source srcset=$\nu =1$ in 2D

        +\begin{align*}
 u&=\sin(2\pi x)\sin(2\pi y), &\qquad\qquad &(x,y)\in\Omega=(0,1)\times (0,1),
 \\
 u&=0,                        &\qquad\qquad &\text{on } \partial \Omega,
-\end{align*} +\end{align*}" src="form_7128.png"/>

        -

        and $f= 8\pi^2 u$. We compute errors against the manufactured solution and evaluate the convergence rate.

        -

        In the second test, we choose Functions::LSingularityFunction on a L-shaped domain (GridGenerator::hyper_L) in 2D. The solution is given in the polar coordinates by $u(r,\phi) = r^{\frac{2}{3}}\sin \left(\frac{2}{3}\phi \right)$, which has a singularity at the origin. An error estimator is constructed to detect the region with large errors, according to which the mesh is refined adaptively.

        +

        and $f= 8\pi^2 u$. We compute errors against the manufactured solution and evaluate the convergence rate.

        +

        In the second test, we choose Functions::LSingularityFunction on a L-shaped domain (GridGenerator::hyper_L) in 2D. The solution is given in the polar coordinates by $u(r,\phi) = r^{\frac{2}{3}}\sin \left(\frac{2}{3}\phi \right)$, which has a singularity at the origin. An error estimator is constructed to detect the region with large errors, according to which the mesh is refined adaptively.

        The commented program

        The first few files have already been covered in previous examples and will thus not be further commented on:

          #include <deal.II/base/quadrature_lib.h>
        @@ -404,7 +404,7 @@
         

        Auxiliary functions

        -

        This function computes the penalty $\sigma$.

        +

        This function computes the penalty $\sigma$.

          double get_penalty_factor(const unsigned int fe_degree,
          const double cell_extent_left,
          const double cell_extent_right)
        @@ -503,7 +503,7 @@

        The remainder of the class's members are used for the following:

        • Vectors to store error estimator square and energy norm square per cell.
        • Print convergence rate and errors on the screen.
        • -
        • The fiffusion coefficient $\nu$ is set to 1.
        • +
        • The fiffusion coefficient $\nu$ is set to 1.
        • Members that store information about the test case to be computed.
          Vector<double> estimated_error_square_per_cell;
        @@ -820,7 +820,7 @@
        void initialize(const SparsityPattern &sparsity_pattern)
        std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
        Definition utilities.cc:470

        The compute_error_estimate() function

        -

        The assembly of the error estimator here is quite similar to that of the global matrix and right-had side and can be handled by the MeshWorker::mesh_loop() framework. To understand what each of the local (lambda) functions is doing, recall first that the local cell residual is defined as $h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2$:

        +

        The assembly of the error estimator here is quite similar to that of the global matrix and right-had side and can be handled by the MeshWorker::mesh_loop() framework. To understand what each of the local (lambda) functions is doing, recall first that the local cell residual is defined as $h_K^2 \left\| f + \nu \Delta u_h \right\|_K^2$:

          template <int dim>
          void SIPGLaplace<dim>::compute_error_estimate()
          {
        @@ -855,8 +855,8 @@
          };
         
        DEAL_II_HOST constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
        -

        Next compute boundary terms $\sum_{f\in \partial K \cap \partial \Omega}
-   \sigma \left\| [  u_h-g_D ]  \right\|_f^2  $:

        +

        Next compute boundary terms $\sum_{f\in \partial K \cap \partial \Omega}
+   \sigma \left\| [  u_h-g_D ]  \right\|_f^2  $:

          const auto boundary_worker =
          [&](const typename DoFHandler<dim>::active_cell_iterator &cell,
          const unsigned int &face_no,
        @@ -887,9 +887,9 @@
          copy_data.value += penalty * difference_norm_square;
          };
         
        -

        And finally interior face terms $\sum_{f\in \partial K}\lbrace \sigma
+</div><!-- fragment --><p>And finally interior face terms   <picture><source srcset=$\sum_{f\in \partial K}\lbrace \sigma
    \left\| [u_h]  \right\|_f^2   +  h_f \left\|  [\nu \nabla u_h \cdot
-   \mathbf n ] \right\|_f^2 \rbrace$:

        + \mathbf n ] \right\|_f^2 \rbrace$" src="form_7133.png"/>:

          const auto face_worker =
          [&](const typename DoFHandler<dim>::cell_iterator &cell,
          const unsigned int &f,
        @@ -981,25 +981,25 @@
        @ update_hessians
        Second derivatives of shape functions.

        The compute_energy_norm_error() function

        Next, we evaluate the accuracy in terms of the energy norm. This function is similar to the assembling of the error estimator above. Here we compute the square of the energy norm defined by

        -\[
+<picture><source srcset=\[
    \|u \|_{1,h}^2 = \sum_{K \in \Gamma_h} \nu\|\nabla u \|_K^2 +
    \sum_{f \in F_i} \sigma \| [ u ] \|_f^2 +
    \sum_{f \in F_b} \sigma  \|u\|_f^2.
-   \] + \]" src="form_7134.png"/>

        Therefore the corresponding error is

        -\[
+<picture><source srcset=\[
    \|u -u_h \|_{1,h}^2 = \sum_{K \in \Gamma_h} \nu\|\nabla (u_h - u)  \|_K^2
    + \sum_{f \in F_i} \sigma  \|[ u_h ] \|_f^2 + \sum_{f \in F_b}\sigma
    \|u_h-g_D\|_f^2.
-   \] + \]" src="form_7135.png"/>

          template <int dim>
          double SIPGLaplace<dim>::compute_energy_norm_error()
          {
          energy_norm_square_per_cell.reinit(triangulation.n_active_cells());
         
        -

        Assemble $\sum_{K \in \Gamma_h} \nu\|\nabla (u_h - u)  \|_K^2 $.

        +

        Assemble $\sum_{K \in \Gamma_h} \nu\|\nabla (u_h - u)  \|_K^2 $.

          const auto cell_worker =
          [&](const typename DoFHandler<dim>::active_cell_iterator &cell,
          ScratchData &scratch_data,
        @@ -1027,7 +1027,7 @@
          copy_data.value = diffusion_coefficient * norm_square;
          };
         
        -

        Assemble $\sum_{f \in F_b}\sigma  \|u_h-g_D\|_f^2$.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 2024-11-15 06:44:32.867702284 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_75.html 2024-11-15 06:44:32.867702284 +0000 @@ -175,65 +175,65 @@

        hp-decision indicators

        With hp-adaptive methods, we not only have to decide which cells we want to refine or coarsen, but we also have the choice how we want to do that: either by adjusting the grid resolution or the polynomial degree of the finite element.

        We will again base the decision on which cells to adapt on (a posteriori) computed error estimates of the current solution, e.g., using the KellyErrorEstimator. We will similarly decide how to adapt with (a posteriori) computed smoothness estimates: large polynomial degrees work best on smooth parts of the solution while fine grid resolutions are favorable on irregular parts. In step-27, we presented a way to calculate smoothness estimates based on the decay of Fourier coefficients. Let us take here the opportunity and present an alternative that follows the same idea, but with Legendre coefficients.

        -

        We will briefly present the idea of this new technique, but limit its description to 1D for simplicity. Suppose $u_\text{hp}(x)$ is a finite element function defined on a cell $K$ as

        -\[
+<p>We will briefly present the idea of this new technique, but limit its description to 1D for simplicity. Suppose <picture><source srcset=$u_\text{hp}(x)$ is a finite element function defined on a cell $K$ as

        +\[
 u_\text{hp}(x) = \sum c_i \varphi_i(x)
-\] +\]" src="form_7144.png"/>

        -

        where each $\varphi_i(x)$ is a shape function. We can equivalently represent $u_\text{hp}(x)$ in the basis of Legendre polynomials $P_k$ as

        -\[
+<p> where each <picture><source srcset=$\varphi_i(x)$ is a shape function. We can equivalently represent $u_\text{hp}(x)$ in the basis of Legendre polynomials $P_k$ as

        +\[
 u_\text{hp}(x) = \sum l_k P_k(x).
-\] +\]" src="form_7145.png"/>

        -

        Our goal is to obtain a mapping between the finite element coefficients $c_i$ and the Legendre coefficients $l_k$. We will accomplish this by writing the problem as a $L^2$-projection of $u_\text{hp}(x)$ onto the Legendre basis. Each coefficient $l_k$ can be calculated via

        -\[
+<p> Our goal is to obtain a mapping between the finite element coefficients <picture><source srcset=$c_i$ and the Legendre coefficients $l_k$. We will accomplish this by writing the problem as a $L^2$-projection of $u_\text{hp}(x)$ onto the Legendre basis. Each coefficient $l_k$ can be calculated via

        +\[
 l_k = \int_K u_\text{hp}(x) P_k(x) dx.
-\] +\]" src="form_7147.png"/>

        -

        By construction, the Legendre polynomials are orthogonal under the $L^2$-inner product on $K$. Additionally, we assume that they have been normalized, so their inner products can be written as

        -\[
+<p> By construction, the Legendre polynomials are orthogonal under the <picture><source srcset=$L^2$-inner product on $K$. Additionally, we assume that they have been normalized, so their inner products can be written as

        +\[
 \int_K P_i(x) P_j(x) dx = \det(J_K) \, \delta_{ij}
-\] +\]" src="form_7148.png"/>

        -

        where $\delta_{ij}$ is the Kronecker delta, and $J_K$ is the Jacobian of the mapping from $\hat{K}$ to $K$, which (in this tutorial) is assumed to be constant (i.e., the mapping must be affine).

        -

        Hence, combining all these assumptions, the projection matrix for expressing $u_\text{hp}(x)$ in the Legendre basis is just $\det(J_K) \,
-\mathbb{I}$ – that is, $\det(J_K)$ times the identity matrix. Let $F_K$ be the Mapping from $K$ to its reference cell $\hat{K}$. The entries in the right-hand side in the projection system are, therefore,

        -\[
+<p> where <picture><source srcset=$\delta_{ij}$ is the Kronecker delta, and $J_K$ is the Jacobian of the mapping from $\hat{K}$ to $K$, which (in this tutorial) is assumed to be constant (i.e., the mapping must be affine).

        +

        Hence, combining all these assumptions, the projection matrix for expressing $u_\text{hp}(x)$ in the Legendre basis is just $\det(J_K) \,
+\mathbb{I}$ – that is, $\det(J_K)$ times the identity matrix. Let $F_K$ be the Mapping from $K$ to its reference cell $\hat{K}$. The entries in the right-hand side in the projection system are, therefore,

        +\[
 \int_K u_\text{hp}(x) P_k(x) dx
 = \det(J_K) \int_{\hat{K}} u_\text{hp}(F_K(\hat{x})) P_k(F_K(\hat{x})) d\hat{x}.
-\] +\]" src="form_7151.png"/>

        -

        Recalling the shape function representation of $u_\text{hp}(x)$, we can write this as $\det(J_K) \, \mathbf{C} \, \mathbf{c}$, where $\mathbf{C}$ is the change-of-basis matrix with entries

        -\[
+<p> Recalling the shape function representation of <picture><source srcset=$u_\text{hp}(x)$, we can write this as $\det(J_K) \, \mathbf{C} \, \mathbf{c}$, where $\mathbf{C}$ is the change-of-basis matrix with entries

        +\[
 \int_K P_i(x) \varphi_j(x) dx
 = \det(J_K) \int_{\hat{K}} P_i(F_K(\hat{x})) \varphi_j(F_K(\hat{x})) d\hat{x}
 = \det(J_K) \int_{\hat{K}} \hat{P}_i(\hat{x}) \hat{\varphi}_j(\hat{x}) d\hat{x}
 \dealcoloneq \det(J_K) \, C_{ij}
-\] +\]" src="form_7153.png"/>

        -

        so the values of $\mathbf{C}$ can be written independently of $K$ by factoring $\det(J_K)$ out front after transforming to reference coordinates. Hence, putting it all together, the projection problem can be written as

        -\[
+<p> so the values of <picture><source srcset=$\mathbf{C}$ can be written independently of $K$ by factoring $\det(J_K)$ out front after transforming to reference coordinates. Hence, putting it all together, the projection problem can be written as

        +\[
 \det(J_K) \, \mathbb{I} \, \mathbf{l} = \det(J_K) \, \mathbf{C} \, \mathbf{c}
-\] +\]" src="form_7154.png"/>

        which can be rewritten as simply

        -\[
+<picture><source srcset=\[
 \mathbf{l} = \mathbf{C} \, \mathbf{c}.
-\] +\]" src="form_7155.png"/>

        -

        At this point, we need to emphasize that most finite element applications use unstructured meshes for which mapping is almost always non-affine. Put another way: the assumption that $J_K$ is constant across the cell is not true for general meshes. Hence, a correct calculation of $l_k$ requires not only that we calculate the corresponding transformation matrix $\mathbf{C}$ for every single cell, but that we also define a set of Legendre-like orthogonal functions on a cell $K$ which may have an arbitrary and very complex geometry. The second part, in particular, is very computationally expensive. The current implementation of the FESeries transformation classes relies on the simplification resulting from having a constant Jacobian to increase performance and thus only yields correct results for affine mappings. The transformation is only used for the purpose of smoothness estimation to decide on the type of adaptation, which is not a critical component of a finite element program. Apart from that, this circumstance does not pose a problem for this tutorial as we only use square-shaped cells.

        +

        At this point, we need to emphasize that most finite element applications use unstructured meshes for which mapping is almost always non-affine. Put another way: the assumption that $J_K$ is constant across the cell is not true for general meshes. Hence, a correct calculation of $l_k$ requires not only that we calculate the corresponding transformation matrix $\mathbf{C}$ for every single cell, but that we also define a set of Legendre-like orthogonal functions on a cell $K$ which may have an arbitrary and very complex geometry. The second part, in particular, is very computationally expensive. The current implementation of the FESeries transformation classes relies on the simplification resulting from having a constant Jacobian to increase performance and thus only yields correct results for affine mappings. The transformation is only used for the purpose of smoothness estimation to decide on the type of adaptation, which is not a critical component of a finite element program. Apart from that, this circumstance does not pose a problem for this tutorial as we only use square-shaped cells.

        Eibner and Melenk [eibner2007hp] argued that a function is analytic, i.e., representable by a power series, if and only if the absolute values of the Legendre coefficients decay exponentially with increasing index $k$:

        -\[
+<picture><source srcset=\[
 \exists C,\sigma > 0 : \quad \forall k \in \mathbb{N}_0 : \quad |l_k|
 \leq C \exp\left( - \sigma k \right) .
-\] +\]" src="form_7156.png"/>

        -

        The rate of decay $\sigma$ can be interpreted as a measure for the smoothness of that function. We can get it as the slope of a linear regression fit of the transformation coefficients:

        -\[
+<p> The rate of decay <picture><source srcset=$\sigma$ can be interpreted as a measure for the smoothness of that function. We can get it as the slope of a linear regression fit of the transformation coefficients:

        +\[
 \ln(|l_k|) \sim \ln(C) - \sigma k .
-\] +\]" src="form_7157.png"/>

        -

        We will perform this fit on each cell $K$ to get a local estimate for the smoothness of the finite element approximation. The decay rate $\sigma_K$ then acts as the decision indicator for hp-adaptation. For a finite element on a cell $K$ with a polynomial degree $p$, calculating the coefficients for $k \leq (p+1)$ proved to be a reasonable choice to estimate smoothness. You can find a more detailed and dimension independent description in [fehling2020].

        +

        We will perform this fit on each cell $K$ to get a local estimate for the smoothness of the finite element approximation. The decay rate $\sigma_K$ then acts as the decision indicator for hp-adaptation. For a finite element on a cell $K$ with a polynomial degree $p$, calculating the coefficients for $k \leq (p+1)$ proved to be a reasonable choice to estimate smoothness. You can find a more detailed and dimension independent description in [fehling2020].

        All of the above is already implemented in the FESeries::Legendre class and the SmoothnessEstimator::Legendre namespace. With the error estimates and smoothness indicators, we are then left to flag the cells for actual refinement and coarsening. Some functions from the parallel::distributed::GridRefinement and hp::Refinement namespaces will help us with that later.

        Hybrid geometric multigrid

        Finite element matrices are typically very sparse. Additionally, hp-adaptive methods correspond to matrices with highly variable numbers of nonzero entries per row. Some state-of-the-art preconditioners, like the algebraic multigrid (AMG) ones as used in step-40, behave poorly in these circumstances.

        @@ -242,18 +242,18 @@

        The test case

        For elliptic equations, each reentrant corner typically invokes a singularity [brenner2008]. We can use this circumstance to put our hp-decision algorithms to a test: on all cells to be adapted, we would prefer a fine grid near the singularity, and a high polynomial degree otherwise.

        As the simplest elliptic problem to solve under these conditions, we chose the Laplace equation in a L-shaped domain with the reentrant corner in the origin of the coordinate system.

        -

        To be able to determine the actual error, we manufacture a boundary value problem with a known solution. On the above mentioned domain, one solution to the Laplace equation is, in polar coordinates, $(r, \varphi)$:

        -\[
+<p>To be able to determine the actual error, we manufacture a boundary value problem with a known solution. On the above mentioned domain, one solution to the Laplace equation is, in polar coordinates, <picture><source srcset=$(r, \varphi)$:

        +\[
 u_\text{sol} = r^{2/3} \sin(2/3 \varphi).
-\] +\]" src="form_7161.png"/>

        See also [brenner2008] or [mitchell2014hp]. The solution looks as follows:

        Analytic solution.

        The singularity becomes obvious by investigating the solution's gradient in the vicinity of the reentrant corner, i.e., the origin

        -\[
+<picture><source srcset=\[
 \left\| \nabla u_\text{sol} \right\|_{2} = 2/3 r^{-1/3} , \quad
 \lim\limits_{r \rightarrow 0} \left\| \nabla u_\text{sol} \right\|_{2} =
 \infty .
-\] +\]" src="form_7162.png"/>

        As we know where the singularity will be located, we expect that our hp-decision algorithm decides for a fine grid resolution in this particular region, and high polynomial degree anywhere else.

        So let's see if that is actually the case, and how hp-adaptation performs compared to pure h-adaptation. But first let us have a detailed look at the actual code.

        @@ -420,7 +420,7 @@
         

        Matrix-free Laplace operator

        This is a matrix-free implementation of the Laplace operator that will basically take over the part of the assemble_system() function from other tutorials. The meaning of all member functions will be explained at their definition later.

        -

        We will use the FEEvaluation class to evaluate the solution vector at the quadrature points and to perform the integration. In contrast to other tutorials, the template arguments degree is set to $-1$ and number of quadrature in 1d to $0$. In this case, FEEvaluation selects dynamically the correct polynomial degree and number of quadrature points. Here, we introduce an alias to FEEvaluation with the correct template parameters so that we do not have to worry about them later on.

        +

        We will use the FEEvaluation class to evaluate the solution vector at the quadrature points and to perform the integration. In contrast to other tutorials, the template arguments degree is set to $-1$ and number of quadrature in 1d to $0$. In this case, FEEvaluation selects dynamically the correct polynomial degree and number of quadrature points. Here, we introduce an alias to FEEvaluation with the correct template parameters so that we do not have to worry about them later on.

          template <int dim, typename number>
          class LaplaceOperator : public Subscriptor
          {
        @@ -1139,8 +1139,8 @@
         
        FESeries::Legendre< dim, spacedim > default_fe_series(const hp::FECollection< dim, spacedim > &fe_collection, const unsigned int component=numbers::invalid_unsigned_int)

        The next part is going to be tricky. During execution of refinement, a few hp-algorithms need to interfere with the actual refinement process on the Triangulation object. We do this by connecting several functions to Triangulation::Signals: signals will be called at different stages during the actual refinement process and trigger all connected functions. We require this functionality for load balancing and to limit the polynomial degrees of neighboring cells.

        -

        For the former, we would like to assign a weight to every cell that is proportional to the number of degrees of freedom of its future finite element. The library offers a class parallel::CellWeights that allows to easily attach individual weights at the right place during the refinement process, i.e., after all refine and coarsen flags have been set correctly for hp-adaptation and right before repartitioning for load balancing is about to happen. Functions can be registered that will attach weights in the form that $a (n_\text{dofs})^b$ with a provided pair of parameters $(a,b)$. We register such a function in the following.

        -

        For load balancing, efficient solvers like the one we use should scale linearly with the number of degrees of freedom owned. We set the parameters for cell weighting correspondingly: A weighting factor of $1$ and an exponent of $1$ (see the definitions of the weighting_factor and weighting_exponent above).

        +

        For the former, we would like to assign a weight to every cell that is proportional to the number of degrees of freedom of its future finite element. The library offers a class parallel::CellWeights that allows to easily attach individual weights at the right place during the refinement process, i.e., after all refine and coarsen flags have been set correctly for hp-adaptation and right before repartitioning for load balancing is about to happen. Functions can be registered that will attach weights in the form that $a (n_\text{dofs})^b$ with a provided pair of parameters $(a,b)$. We register such a function in the following.

        +

        For load balancing, efficient solvers like the one we use should scale linearly with the number of degrees of freedom owned. We set the parameters for cell weighting correspondingly: A weighting factor of $1$ and an exponent of $1$ (see the definitions of the weighting_factor and weighting_exponent above).

          cell_weights = std::make_unique<parallel::CellWeights<dim>>(
          dof_handler,
        @@ -1668,7 +1668,7 @@

        The deal.II library offers multiple strategies to decide which type of adaptation to impose on cells: either adjust the grid resolution or change the polynomial degree. We only presented the Legendre coefficient decay strategy in this tutorial, while step-27 demonstrated the Fourier equivalent of the same idea.

        See the "possibilities for extensions" section of step-27 for an overview over these strategies, or the corresponding documentation for a detailed description.

        There, another strategy is mentioned that has not been shown in any tutorial so far: the strategy based on refinement history. The usage of this method for parallel distributed applications is more tricky than the others, so we will highlight the challenges that come along with it. We need information about the final state of refinement flags, and we need to transfer the solution across refined meshes. For the former, we need to attach the hp::Refinement::predict_error() function to the Triangulation::Signals::post_p4est_refinement signal in a way that it will be called after the hp::Refinement::limit_p_level_difference() function. At this stage, all refinement flags and future FE indices are terminally set and a reliable prediction of the error is possible. The predicted error then needs to be transferred across refined meshes with the aid of parallel::distributed::CellDataTransfer.

        -

        Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

        +

        Try implementing one of these strategies into this tutorial and observe the subtle changes to the results. You will notice that all strategies are capable of identifying the singularities near the reentrant corners and will perform $h$-refinement in these regions, while preferring $p$-refinement in the bulk domain. A detailed comparison of these strategies is presented in [fehling2020] .

        Solve with matrix-based methods

        This tutorial focuses solely on matrix-free strategies. All hp-adaptive algorithms however also work with matrix-based approaches in the parallel distributed context.

        To create a system matrix, you can either use the LaplaceOperator::get_system_matrix() function, or use an assemble_system() function similar to the one of step-27. You can then pass the system matrix to the solver as usual.

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2024-11-15 06:44:32.967703177 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_76.html 2024-11-15 06:44:32.967703177 +0000 @@ -377,7 +377,7 @@
        dst,
        src);

        VectorizedArrayType

        -

        The class VectorizedArray<Number> is a key component to achieve the high node-level performance of the matrix-free algorithms in deal.II. It is a wrapper class around a short vector of $n$ entries of type Number and maps arithmetic operations to appropriate single-instruction/multiple-data (SIMD) concepts by intrinsic functions. The length of the vector can be queried by VectorizedArray::size() and its underlying number type by VectorizedArray::value_type.

        +

        The class VectorizedArray<Number> is a key component to achieve the high node-level performance of the matrix-free algorithms in deal.II. It is a wrapper class around a short vector of $n$ entries of type Number and maps arithmetic operations to appropriate single-instruction/multiple-data (SIMD) concepts by intrinsic functions. The length of the vector can be queried by VectorizedArray::size() and its underlying number type by VectorizedArray::value_type.

        In the default case (VectorizedArray<Number>), the vector length is set at compile time of the library to match the highest value supported by the given processor architecture. However, also a second optional template argument can be specified as VectorizedArray<Number, size>, where size explicitly controls the vector length within the capabilities of a particular instruction set. A full list of supported vector lengths is presented in the following table:

        /usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 2024-11-15 06:44:33.027703713 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_77.html 2024-11-15 06:44:33.027703713 +0000 @@ -197,15 +197,15 @@ F(U) = 0 \end{align*}" src="form_7190.png"/>

        -

        and constructs a sequence of iterates $U_k$ which, in general, are vectors of the same length as the vector returned by the function $F$. To do this, there are a few things it needs from the user:

          +

          and constructs a sequence of iterates $U_k$ which, in general, are vectors of the same length as the vector returned by the function $F$. To do this, there are a few things it needs from the user:

          • A way to resize a given vector to the correct size.
          • -
          • A way to evaluate, for a given vector $U$, the function $F(U)$. This function is generally called the "residual" operation because the goal is of course to find a point $U^\ast$ for which $F(U^\ast)=0$; if $F(U)$ returns a nonzero vector, then this is the "residual" (i.e., the "rest", or whatever is "left over"). The function that will do this is in essence the same as the computation of the right hand side vector in step-15, but with an important difference: There, the right hand side denoted the negative of the residual, so we have to switch a sign.
          • +
          • A way to evaluate, for a given vector $U$, the function $F(U)$. This function is generally called the "residual" operation because the goal is of course to find a point $U^\ast$ for which $F(U^\ast)=0$; if $F(U)$ returns a nonzero vector, then this is the "residual" (i.e., the "rest", or whatever is "left over"). The function that will do this is in essence the same as the computation of the right hand side vector in step-15, but with an important difference: There, the right hand side denoted the negative of the residual, so we have to switch a sign.
          • A way to compute the matrix $J_k$ if that is necessary in the current iteration, along with possibly a preconditioner or other data structures (e.g., a sparse decomposition via SparseDirectUMFPACK if that's what we choose to use to solve a linear system). This operation will generally be called the "setup" operation.
          • A way to solve a linear system $\tilde J_k x = b$ with whatever matrix $\tilde J_k$ was last computed. This operation will generally be called the "solve" operation.

          All of these operations need to be provided to KINSOL by std::function objects that take the appropriate set of arguments and that generally return an integer that indicates success (a zero return value) or failure (a nonzero return value). Specifically, the objects we will access are the SUNDIALS::KINSOL::reinit_vector, SUNDIALS::KINSOL::residual, SUNDIALS::KINSOL::setup_jacobian, and SUNDIALS::KINSOL::solve_with_jacobian member variables. (See the documentation of these variables for their details.) In our implementation, we will use lambda functions to implement these "callbacks" that in turn can call member functions; KINSOL will then call these callbacks whenever its internal algorithms think it is useful.

          Details of the implementation

          -

          The majority of the code of this tutorial program is as in step-15, and we will not comment on it in much detail. There is really just one aspect one has to pay some attention to, namely how to compute $F(U)$ given a vector $U$ on the one hand, and $J(U)$ given a vector $U$ separately. At first, this seems trivial: We just take the assemble_system() function and in the one case throw out all code that deals with the matrix and in the other case with the right hand side vector. There: Problem solved.

          +

          The majority of the code of this tutorial program is as in step-15, and we will not comment on it in much detail. There is really just one aspect one has to pay some attention to, namely how to compute $F(U)$ given a vector $U$ on the one hand, and $J(U)$ given a vector $U$ separately. At first, this seems trivial: We just take the assemble_system() function and in the one case throw out all code that deals with the matrix and in the other case with the right hand side vector. There: Problem solved.

          But it isn't quite as simple. That's because the two are not independent if we have nonzero Dirichlet boundary values, as we do here. The linear system we want to solve contains both interior and boundary degrees of freedom, and when eliminating those degrees of freedom from those that are truly "free", using for example AffineConstraints::distribute_local_to_global(), we need to know the matrix when assembling the right hand side vector.

          Of course, this completely contravenes the original intent: To not assemble the matrix if we can get away without it. We solve this problem as follows:

          • We set the starting guess for the solution vector, $U_0$, to one where boundary degrees of freedom already have their correct values.
          • @@ -477,7 +477,7 @@
             

            Computing the residual vector

            The second part of what assemble_system() used to do in step-15 is computing the residual vector, i.e., the right hand side vector of the Newton linear systems. We have broken this out of the previous function, but the following function will be easy to understand if you understood what assemble_system() in step-15 did. Importantly, however, we need to compute the residual not linearized around the current solution vector, but whatever we get from KINSOL. This is necessary for operations such as line search where we want to know what the residual $F(U^k + \alpha_k \delta
-   U^K)$ is for different values of $\alpha_k$; KINSOL in those cases simply gives us the argument to the function $F$ and we then compute the residual $F(\cdot)$ at this point.

            + U^K)$" src="form_7197.png"/> is for different values of $\alpha_k$; KINSOL in those cases simply gives us the argument to the function $F$ and we then compute the residual $F(\cdot)$ at this point.

            The function prints the norm of the so-computed residual at the end as a way for us to follow along the progress of the program.

              template <int dim>
              void MinimalSurfaceProblem<dim>::compute_residual(
            /usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 2024-11-15 06:44:33.083704214 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_78.html 2024-11-15 06:44:33.083704214 +0000 @@ -167,10 +167,10 @@ K : && \text{Strike price for purchasing asset} \end{align*}" src="form_7205.png"/>

            -

            The way we should interpret this equation is that it is a time-dependent partial differential equation of one "space" variable $S$ as the price of the stock, and $V(S,t)$ is the price of the option at time $t$ if the stock price at that time were $S$.

            +

            The way we should interpret this equation is that it is a time-dependent partial differential equation of one "space" variable $S$ as the price of the stock, and $V(S,t)$ is the price of the option at time $t$ if the stock price at that time were $S$.

            Particularities of the equation system

            There are a number of oddities in this equation that are worth discussing before moving on to its numerical solution. First, the "spatial" domain $\Omega\subset\mathbb{R}$ is unbounded, and thus $S$ can be unbounded in value. This is because there may be a practical upper bound for stock prices, but not a conceptual one. The boundary conditions $V(S,t)\rightarrow S$ as $S\rightarrow \infty$ can then be interpreted as follows: What is the value of an option that allows me to buy a stock at price $K$ if the stock price (today or at time $t=T$) is $S\gg K$? One would expect that it is $V\approx S-K$ plus some adjustment for inflation, or, if we really truly consider huge values of $S$, we can neglect $K$ and arrive at the statement that the boundary values at the infinite boundary should be of the form $V\rightarrow S$ as stated above.

            -

            In practice, for us to use a finite element method to solve this, we are going to need to bound $\Omega$. Since this equation describes prices, and it doesn't make sense to talk about prices being negative, we will set the lower bound of $\Omega$ to be 0. Then, for an upper bound, we will choose a very large number, one that $S$ is not very likely to ever get to. We will call this $S_\text{max}$. So, $\Omega=[0,S_\text{max}]$.

            +

            In practice, for us to use a finite element method to solve this, we are going to need to bound $\Omega$. Since this equation describes prices, and it doesn't make sense to talk about prices being negative, we will set the lower bound of $\Omega$ to be 0. Then, for an upper bound, we will choose a very large number, one that $S$ is not very likely to ever get to. We will call this $S_\text{max}$. So, $\Omega=[0,S_\text{max}]$.

            Second, after truncating the domain, we need to ask what boundary values we should pose at this now finite boundary. To take care of this, we use "put-call" parity [stoll1969relationship]. A "pull option" is one in which we are allowed, but not required, to sell a stock at price $K$ to someone at a future time $T$. This says

            \begin{align*}
     V(S,t)+Ke^{-r(T-t)}=P(S,t)+S
@@ -182,8 +182,8 @@
 \end{align*}

            and we can use this as a reasonable boundary condition at our finite point $S_\text{max}$.

            -

            The second complication of the Block-Scholes equation is that we are given a final condition, and not an initial condition. This is because we know what the option is worth at time $t=T$: If the stock price at $T$ is $S<K$, then we have no incentive to use our option of buying a price $K$ because we can buy that stock for cheaper on the open market. So $V(S,T)=0$ for $S<K$. On the other hand, if at time $T$ we have $S>K$, then we can buy the stock at price $K$ via the option and immediately sell it again on the market for price $S$, giving me a profit of $S-K$. In other words, $V(S,T)=S-K$ for $S>K$. So, we only know values for $V$ at the end time but not the initial time – in fact, finding out what a fair price at the current time (conventionally taken to be $t=0$) is what solving these equations is all about.

            -

            This means that this is not an equation that is posed going forward in time, but in fact going backward in time. Thus it makes sense to solve this problem in reverse by making the change of variables $\tau=T-t$ where now $\tau$ denotes the time before the strike time $T$.

            +

            The second complication of the Block-Scholes equation is that we are given a final condition, and not an initial condition. This is because we know what the option is worth at time $t=T$: If the stock price at $T$ is $S<K$, then we have no incentive to use our option of buying a price $K$ because we can buy that stock for cheaper on the open market. So $V(S,T)=0$ for $S<K$. On the other hand, if at time $T$ we have $S>K$, then we can buy the stock at price $K$ via the option and immediately sell it again on the market for price $S$, giving me a profit of $S-K$. In other words, $V(S,T)=S-K$ for $S>K$. So, we only know values for $V$ at the end time but not the initial time – in fact, finding out what a fair price at the current time (conventionally taken to be $t=0$) is what solving these equations is all about.

            +

            This means that this is not an equation that is posed going forward in time, but in fact going backward in time. Thus it makes sense to solve this problem in reverse by making the change of variables $\tau=T-t$ where now $\tau$ denotes the time before the strike time $T$.

            With all of this, we finally end up with the following problem:

            \begin{align*}
     &-\frac{\partial V}{\partial \tau} + \frac{\sigma^2S^2}{2} \
@@ -266,7 +266,7 @@
     \textbf{D}_{i,j} = \left(\frac{d\phi_i(S)}{dS}S^2,\frac{d\phi_j(S)}{dS}\right)
 \end{align*}

            -

            So, after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (2):

            +

            So, after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (2):

            \begin{align*}
     &k_n\int_0^{S_\text{max}}\phi_i(S)\left[\frac{\sigma^2S^2}{2}
         \left[(1-\theta)\
@@ -278,7 +278,7 @@
      -\theta\frac{1}{2}\sigma^2\textbf{D}V^{n}\right]
 \end{align*}

            -

            But, because the matrix $\textbf{B}$ involves an advective term, we will choose $\theta=0$ there – in other words, we use an explicit Euler method to treat advection. Conversely, since the matrix $\textbf{D}$ involves the diffusive term, we will choose $\theta=1/2$ there – i.e., we treat diffusion using the second order Crank-Nicolson method.

            +

            But, because the matrix $\textbf{B}$ involves an advective term, we will choose $\theta=0$ there – in other words, we use an explicit Euler method to treat advection. Conversely, since the matrix $\textbf{D}$ involves the diffusive term, we will choose $\theta=1/2$ there – i.e., we treat diffusion using the second order Crank-Nicolson method.

            So, we arrive at the following:

            \begin{align*}
     k_n\left[-\frac{1}{4}\sigma^2\textbf{D}V^{n-1} \
@@ -304,7 +304,7 @@
     \textbf{A}_{i,j} = \left(S\frac{d\phi_i(S)}{dS}, \phi_j(S)\right)
 \end{align*}

            -

            So, again after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (3):

            +

            So, again after adding in the constants and exchanging $V^n$ for $V^{n-1}$ where applicable, we arrive at the following for (3):

            \begin{align*}
     &k_n\int_0^{S_\text{max}}\phi_i(S)\left[rS\left[(1-\theta)
         \frac{dV^{n-1}(S)}{dS} +\
@@ -313,7 +313,7 @@
     -\theta r\textbf{M}V^n -\theta r\textbf{A}V^n\right]
 \end{align*}

            -

            Just as before, we will use $\theta=0$ for the matrix $\textbf{A}$ and $\theta=\frac{1}{2}$ for the matrix $\textbf{M}$. So, we arrive at the following for (3):

            +

            Just as before, we will use $\theta=0$ for the matrix $\textbf{A}$ and $\theta=\frac{1}{2}$ for the matrix $\textbf{M}$. So, we arrive at the following for (3):

            \begin{align*}
     k_n\left[-\frac{1}{2}r\textbf{M}V^{n-1} - \frac{1}{2}r\textbf{M}V^n \
     -r\textbf{A}V^{n-1}\right]
@@ -370,7 +370,7 @@
     &V(S, 0) = -S^2 + 6
 \end{align*}

            -

            Where, $f(S,\tau) = 2\tau - \sigma^2S^2 - 2rS^2 - r(-\tau^2 - S^2 + 6)$. This set-up now has right hand sides for the equation itself and for the boundary conditions at $S=0$ that we did not have before, along with "final" conditions (or, with $\tau$-time "initial conditions") that do not match the real situation. We will implement this in such a way in the code that it is easy to exchange – the introduction of the changes above is just meant to enable the use of a manufactured solution.

            +

            Where, $f(S,\tau) = 2\tau - \sigma^2S^2 - 2rS^2 - r(-\tau^2 - S^2 + 6)$. This set-up now has right hand sides for the equation itself and for the boundary conditions at $S=0$ that we did not have before, along with "final" conditions (or, with $\tau$-time "initial conditions") that do not match the real situation. We will implement this in such a way in the code that it is easy to exchange – the introduction of the changes above is just meant to enable the use of a manufactured solution.

            If the program is working correctly, then it should produce (**) as the solution. This does mean that we need to modify our variational form somewhat to account for the non-zero right hand side.

            First, we define the following:

            \begin{align*}
@@ -903,7 +903,7 @@
 <div class= 

            void refine_global(const unsigned int times=1)

            BlackScholes::process_solution

            -

            This is where we calculate the convergence and error data to evaluate the effectiveness of the program. Here, we calculate the $L^2$, $H^1$ and $L^{\infty}$ norms.

            +

            This is where we calculate the convergence and error data to evaluate the effectiveness of the program. Here, we calculate the $L^2$, $H^1$ and $L^{\infty}$ norms.

              template <int dim>
              void BlackScholes<dim>::process_solution()
              {
            @@ -960,7 +960,7 @@
            void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)

            BlackScholes::write_convergence_table

            -

            This next part is building the convergence and error tables. By this, we need to set the settings for how to output the data that was calculated during BlackScholes::process_solution. First, we will create the headings and set up the cells properly. During this, we will also prescribe the precision of our results. Then we will write the calculated errors based on the $L^2$, $H^1$, and $L^{\infty}$ norms to the console and to the error LaTeX file.

            +

            This next part is building the convergence and error tables. By this, we need to set the settings for how to output the data that was calculated during BlackScholes::process_solution. First, we will create the headings and set up the cells properly. During this, we will also prescribe the precision of our results. Then we will write the calculated errors based on the $L^2$, $H^1$, and $L^{\infty}$ norms to the console and to the error LaTeX file.

              template <int dim>
              void BlackScholes<dim>::write_convergence_table()
              {
            @@ -1102,7 +1102,7 @@
              vmult_result);
             

            The second piece is to compute the contributions of the source terms. This corresponds to the term $-k_n\left[\frac{1}{2}F^{n-1}
-   +\frac{1}{2}F^n\right]$. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

            + +\frac{1}{2}F^n\right]$" src="form_7261.png"/>. The following code calls VectorTools::create_right_hand_side to compute the vectors $F$, where we set the time of the right hand side (source) function before we evaluate it. The result of this all ends up in the forcing_terms variable:

              RightHandSide<dim> rhs_function(asset_volatility, interest_rate);
              rhs_function.set_time(time);
            @@ -1256,7 +1256,7 @@
            32 1.804e-02 2.00 1.00 1.457e-04 4.10 2.03
            64 9.022e-03 2.00 1.00 3.307e-05 4.41 2.14
            128 4.511e-03 2.00 1.00 5.016e-06 6.59 2.72
            -

            What is more interesting is the output of the convergence tables. They are outputted into the console, as well into a LaTeX file. The convergence tables are shown above. Here, you can see that the solution has a convergence rate of $\mathcal{O}(h)$ with respect to the $H^1$-norm, and the solution has a convergence rate of $\mathcal{O}(h^2)$ with respect to the $L^2$-norm.

            +

            What is more interesting is the output of the convergence tables. They are outputted into the console, as well into a LaTeX file. The convergence tables are shown above. Here, you can see that the solution has a convergence rate of $\mathcal{O}(h)$ with respect to the $H^1$-norm, and the solution has a convergence rate of $\mathcal{O}(h^2)$ with respect to the $L^2$-norm.

            Below is the visualization of the solution.

            Solution of the MMS problem.

            The plain program

            /* ------------------------------------------------------------------------
            /usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 2024-11-15 06:44:33.191705178 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_79.html 2024-11-15 06:44:33.191705178 +0000 @@ -153,198 +153,198 @@
          • The plain program

        Introduction

        -

        Topology Optimization of Elastic Media is a technique used to optimize a structure that is bearing some load. Ideally, we would like to minimize the maximum stress placed on a structure by selecting a region $E$ where material is placed. In other words,

        -\[
+<p>Topology Optimization of Elastic Media is a technique used to optimize a structure that is bearing some load. Ideally, we would like to minimize the maximum stress placed on a structure by selecting a region <picture><source srcset=$E$ where material is placed. In other words,

        +\[
   \text{minimize}\| \boldsymbol{\sigma} (\mathbf{u}) \|_\infty
-\] +\]" src="form_7264.png"/>

        -\[
+<picture><source srcset=\[
   \text{subject to } |E|\leq V_{\max},
-\] +\]" src="form_7265.png"/>

        -\[
+<picture><source srcset=\[
   \text{and } \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0}.
-\] +\]" src="form_7266.png"/>

        -

        Here, $\boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon}(\mathbf{u})$ is the stress within the body that is caused by the external forces $\mathbf F$, where we have for simplicity assumed that the material is linear-elastic and so $\mathbf{C}$ is the stress-strain tensor and $\boldsymbol{\varepsilon}(\mathbf{u})=\frac{1}{2} (\nabla \mathbf{u} + (\nabla\mathbf{u})^T)$ is the small-deformation strain as a function of the displacement $\mathbf{u}$ – see step-8 and step-17 for more on linear elasticity. In the formulation above, $V_\text{max}$ is the maximal amount of material we are willing to provide to build the object. The last of the constraints is the partial differential equation that relates stress $\boldsymbol{\sigma}$ and forces $\mathbf F$ and is simply the steady-state force balance.

        +

        Here, $\boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon}(\mathbf{u})$ is the stress within the body that is caused by the external forces $\mathbf F$, where we have for simplicity assumed that the material is linear-elastic and so $\mathbf{C}$ is the stress-strain tensor and $\boldsymbol{\varepsilon}(\mathbf{u})=\frac{1}{2} (\nabla \mathbf{u} + (\nabla\mathbf{u})^T)$ is the small-deformation strain as a function of the displacement $\mathbf{u}$ – see step-8 and step-17 for more on linear elasticity. In the formulation above, $V_\text{max}$ is the maximal amount of material we are willing to provide to build the object. The last of the constraints is the partial differential equation that relates stress $\boldsymbol{\sigma}$ and forces $\mathbf F$ and is simply the steady-state force balance.

        That said, the infinity norm above creates a problem: As a function of location of material, this objective function is necessarily not differentiable, making prospects of optimization rather bleak. So instead, a common approach in topology optimization is to find an approximate solution by optimizing a related problem: We would like to minimize the strain energy. This is a measure of the potential energy stored in an object due to its deformation, but also works as a measure of total deformation over the structure.

        -\[
+<picture><source srcset=\[
   \text{minimize  } \int_E \frac{1}{2}\boldsymbol{\sigma} : \boldsymbol{\varepsilon} dV
-\] +\]" src="form_7270.png"/>

        -\[
+<picture><source srcset=\[
   \text{subject to } \|E\| \leq V_{\max}
-\] +\]" src="form_7271.png"/>

        -\[
+<picture><source srcset=\[
   \text{and } \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0}
-\] +\]" src="form_7272.png"/>

        The value of the objective function is calculated using a finite element method, where the solution is the displacements. This is placed inside of a nonlinear solver loop that solves for a vector denoting placement of material.

        Solid Isotropic Material with Penalization

        -

        In actual practice, we can only build objects in which the material is either present, or not present, at any given point – i.e., we would have an indicator function $\rho_E(\mathbf{x})\in \{0,1\}$ that describes the material-filled region and that we want to find through the optimization problem. In this case, the optimization problem becomes combinatorial, and very expensive to solve. Instead, we use an approach called Solid Isotropic Material with Penalization, or SIMP. [Bendse2004]

        -

        The SIMP method is based on an idea of allowing the material to exist in a location with a density $\rho$ between 0 and 1. A density of 0 suggests the material is not there, and it is not a part of the structure, while a density of 1 suggests the material is present. Values between 0 and 1 do not reflect a design we can create in the real-world, but allow us to turn the combinatorial problem into a continuous one. One then looks at density values $\rho$, with the constraint that $0 < \rho_{\min} \leq \rho \leq 1$. The minimum value $\rho_{\min}$, typically chosen to be around $10^{-3}$, avoids the possibility of having an infinite strain energy, but is small enough to provide accurate results.

        -

        The straightforward application of the effect of this "density" on the elasticity of the media would be to simply multiply the stiffness tensor $\mathbf{C}_0$ of the medium by the given density, that is, $\mathbf{C} = \rho \mathbf{C}_0$. However, this approach often gives optimal solutions where density values are far from both 0 and 1. As one wants to find a real-world solution, meaning the material either is present or it is not, a penalty is applied to these in-between values. A simple and effective way to do this is to multiply the stiffness tensor by the density raised to some integer power penalty parameter $p$, so that $\mathbf{C} = \rho^p \mathbf{C}_0$. This makes density values farther away from 0 or 1 less effective. It has been shown that using $p=3$ is sufficiently high to create 'black-and-white' solutions: that is, one gets optimal solutions in which material is either present or not present at all points.

        +

        In actual practice, we can only build objects in which the material is either present, or not present, at any given point – i.e., we would have an indicator function $\rho_E(\mathbf{x})\in \{0,1\}$ that describes the material-filled region and that we want to find through the optimization problem. In this case, the optimization problem becomes combinatorial, and very expensive to solve. Instead, we use an approach called Solid Isotropic Material with Penalization, or SIMP. [Bendse2004]

        +

        The SIMP method is based on an idea of allowing the material to exist in a location with a density $\rho$ between 0 and 1. A density of 0 suggests the material is not there, and it is not a part of the structure, while a density of 1 suggests the material is present. Values between 0 and 1 do not reflect a design we can create in the real-world, but allow us to turn the combinatorial problem into a continuous one. One then looks at density values $\rho$, with the constraint that $0 < \rho_{\min} \leq \rho \leq 1$. The minimum value $\rho_{\min}$, typically chosen to be around $10^{-3}$, avoids the possibility of having an infinite strain energy, but is small enough to provide accurate results.

        +

        The straightforward application of the effect of this "density" on the elasticity of the media would be to simply multiply the stiffness tensor $\mathbf{C}_0$ of the medium by the given density, that is, $\mathbf{C} = \rho \mathbf{C}_0$. However, this approach often gives optimal solutions where density values are far from both 0 and 1. As one wants to find a real-world solution, meaning the material either is present or it is not, a penalty is applied to these in-between values. A simple and effective way to do this is to multiply the stiffness tensor by the density raised to some integer power penalty parameter $p$, so that $\mathbf{C} = \rho^p \mathbf{C}_0$. This makes density values farther away from 0 or 1 less effective. It has been shown that using $p=3$ is sufficiently high to create 'black-and-white' solutions: that is, one gets optimal solutions in which material is either present or not present at all points.

        More material should always provide a structure with a lower strain energy, and so the inequality constraint can be viewed as an equality where the total volume used is the maximum volume.

        Using this density idea also allows us to reframe the volume constraint on the optimization problem. Use of SIMP then turns the optimization problem into the following:

        -\[
+<picture><source srcset=\[
   \text{minimize  } \int_\Omega \frac{1}{2}\boldsymbol{\sigma}(\rho) : \boldsymbol{\varepsilon}(\rho) d\Omega
-\] +\]" src="form_7279.png"/>

        -\[
+<picture><source srcset=\[
   \text{subject to } \int_\Omega \rho(x) d\Omega= V_{\max},
-\] +\]" src="form_7280.png"/>

        -\[
+<picture><source srcset=\[
   0<\rho_{\min}\leq \rho(x) \leq 1,
-\] +\]" src="form_7281.png"/>

        -\[
+<picture><source srcset=\[
   \nabla \cdot \boldsymbol{\sigma}(\rho) + \mathbf{F} = 0 \quad \text{on } \Omega
-\] +\]" src="form_7282.png"/>

        The final constraint, the balance of linear momentum (which we will refer to as the elasticity equation), gives a method for finding $\boldsymbol{\sigma}$ and $\boldsymbol{\varepsilon}$ given the density $\rho$.

        Elasticity Equation

        The elasticity equation in the time independent limit reads

        -\[
+<picture><source srcset=\[
   \nabla \cdot \boldsymbol{\sigma} + \mathbf{F} = \mathbf{0} .
-\] +\]" src="form_7283.png"/>

        In the situations we will care about, we will assume that the medium has a linear material response and in that case, we have that

        -\[
+<picture><source srcset=\[
   \boldsymbol{\sigma} = \mathbf{C} : \boldsymbol{\varepsilon} = \rho^p \mathbf{C}_0 : \boldsymbol{\varepsilon}(\mathbf{u})
    = \rho^p \mathbf{C}_0 : \left[\frac{1}{2} (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) \right] .
-\] +\]" src="form_7284.png"/>

        In everything we will do below, we will always consider the displacement field $\mathbf{u}$ as the only solution variable, rather than considering $\mathbf{u}$ and $\boldsymbol{\sigma}$ as solution variables (as is done in mixed formulations).

        Furthermore, we will make the assumption that the material is linear isotropic, in which case the stress-strain tensor can be expressed in terms of the Lamé parameters $\lambda,\mu$ such that

        -\begin{align}
+<picture><source srcset=\begin{align}
   \boldsymbol{\sigma} &= \rho^p (\lambda \text{tr}(\boldsymbol{\varepsilon}) \mathbf{I} + 2 \mu \boldsymbol{\varepsilon}) , \\
   \sigma_{i,j} &= \rho^p (\lambda \varepsilon_{k,k} \delta_{i,j} + 2 \mu \varepsilon_{i,j}) .
-\end{align} +\end{align}" src="form_7285.png"/>

        See step-8 for how this transformation works.

        Integrating the objective function by parts gives

        -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u}))^T  d\Omega+
   \int_\Omega (\nabla \cdot \boldsymbol{\sigma}(\rho)) \cdot \mathbf{u}  d\Omega=
   \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega ,
-\] +\]" src="form_7286.png"/>

        into which the linear elasticity equation can then be substituted, giving

        -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) d\Omega =
   \int_\Omega \mathbf{F}\cdot \mathbf{u} d\Omega+
   \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega .
-\] +\]" src="form_7287.png"/>

        Because we are assuming no body forces, this simplifies further to

        -\[
+<picture><source srcset=\[
   \int_\Omega \boldsymbol{\sigma}(\rho) : (\nabla \mathbf{u} + (\nabla \mathbf{u})^T) d\Omega
   = \int_{\partial \Omega} \mathbf{t} \cdot \mathbf{u} d\partial\Omega,
-\] +\]" src="form_7288.png"/>

        which is the final form of the governing equation that we'll be considering from this point forward.

        Making the solution mesh-independent

        Typically, the solutions to topology optimization problems are mesh-dependent, and as such the problem is ill-posed. This is because fractal structures are often formed as the mesh is refined further. As the mesh gains resolution, the optimal solution typically gains smaller and smaller structures. There are a few competing workarounds to this issue, but the most popular for first order optimization is the sensitivity filter, while second order optimization methods tend to prefer use of a density filter.

        -

        As the filters affect the gradient and Hessian of the strain energy (i.e., the objective function), the choice of filter has an effect on the solution of the problem. The density filter as part of a second order method works by introducing an unfiltered density, which we refer to as $\varrho$, and then requiring that the density be a convolution of the unfiltered density:

        -\[
+<p>As the filters affect the gradient and Hessian of the strain energy (i.e., the objective function), the choice of filter has an effect on the solution of the problem. The density filter as part of a second order method works by introducing an unfiltered density, which we refer to as <picture><source srcset=$\varrho$, and then requiring that the density be a convolution of the unfiltered density:

        +\[
   \rho = H(\varrho).
-\] +\]" src="form_7290.png"/>

        -

        Here, $H$ is an operator so that $\rho(\mathbf{x})$ is some kind of average of the values of $\varrho$ in the area around $\mathbf{x}$ – i.e., it is a smoothed version of $\varrho$.

        +

        Here, $H$ is an operator so that $\rho(\mathbf{x})$ is some kind of average of the values of $\varrho$ in the area around $\mathbf{x}$ – i.e., it is a smoothed version of $\varrho$.

        This prevents checkerboarding; the radius of the filter allows the user to define an effective minimal beam width for the optimal structures we seek to find.

        Checkerboarding occurring in an MBB Beam

        Complete Problem Formulation

        The minimization problem is now

        -\[
+<picture><source srcset=\[
   \min_{\rho,\varrho,\mathbf{u}} \int_{\partial\Omega} \mathbf{u} \cdot \mathbf{t} d\partial\Omega
-\] +\]" src="form_7292.png"/>

        -\[
+<picture><source srcset=\[
   \text{subject to   } \rho = H(\varrho)
-\] +\]" src="form_7293.png"/>

        -\[
+<picture><source srcset=\[
   \int_\Omega \rho^p \left(\frac{\mu}{2}\left(\boldsymbol{\varepsilon}(\mathbf{v}):
   \boldsymbol{\varepsilon}(\mathbf{u})) \right) + \lambda \left( \nabla \cdot \mathbf{u} \nabla
   \cdot \mathbf{v} \right)  \right) d\Omega = \int_{\partial \Omega} \mathbf{v} \cdot
   \mathbf{t} d\partial\Omega
-\] +\]" src="form_7294.png"/>

        -\[
+<picture><source srcset=\[
   \int_\Omega \rho d\Omega= V
-\] +\]" src="form_7295.png"/>

        -\[
+<picture><source srcset=\[
   0\leq \varrho \leq 1
-\] +\]" src="form_7296.png"/>

        -

        The inequality constraints are dealt with by first introducing slack variables, and second using log barriers to ensure that we obtain an interior-point method. The penalty parameter is going to be $\alpha$, and the following slack variables are

          +

          The inequality constraints are dealt with by first introducing slack variables, and second using log barriers to ensure that we obtain an interior-point method. The penalty parameter is going to be $\alpha$, and the following slack variables are

          1. -$s_1$ - a slack variable corresponding to the lower bound
          2. /usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 2024-11-15 06:44:33.239705607 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_8.html 2024-11-15 06:44:33.239705607 +0000 @@ -140,51 +140,51 @@
            Note
            The material presented here is also discussed in video lecture 19. (All video lectures are also available here.)

            In this tutorial program we will want to solve the elastic equations. They are an extension to Laplace's equation with a vector-valued solution that describes the displacement in each space direction of a rigid body which is subject to a force. Of course, the force is also vector-valued, meaning that in each point it has a direction and an absolute value.

            One can write the elasticity equations in a number of ways. The one that shows the symmetry with the Laplace equation in the most obvious way is to write it as

            -\[
+<picture><source srcset=\[
   -
   \text{div}\,
   ({\mathbf C} \nabla \mathbf{u})
   =
   \mathbf f,
-\] +\]" src="form_7360.png"/>

            -

            where $\mathbf u$ is the vector-valued displacement at each point, $\mathbf f$ the force, and ${\mathbf C}$ is a rank-4 tensor (i.e., it has four indices) that encodes the stress-strain relationship – in essence, it represents the "spring constant" in Hookes law that relates the displacement to the forces. ${\mathbf C}$ will, in many cases, depend on $\mathbf x$ if the body whose deformation we want to simulate is composed of different materials.

            +

            where $\mathbf u$ is the vector-valued displacement at each point, $\mathbf f$ the force, and ${\mathbf C}$ is a rank-4 tensor (i.e., it has four indices) that encodes the stress-strain relationship – in essence, it represents the "spring constant" in Hookes law that relates the displacement to the forces. ${\mathbf C}$ will, in many cases, depend on $\mathbf x$ if the body whose deformation we want to simulate is composed of different materials.

            While the form of the equations above is correct, it is not the way they are usually derived. In truth, the gradient of the displacement $\nabla\mathbf u$ (a matrix) has no physical meaning whereas its symmetrized version,

            -\[
+<picture><source srcset=\[
 \varepsilon(\mathbf u)_{kl} =\frac{1}{2}(\partial_k u_l + \partial_l u_k),
-\] +\]" src="form_7362.png"/>

            -

            does and is typically called the "strain". (Here and in the following, $\partial_k=\frac{\partial}{\partial x_k}$. We will also use the Einstein summation convention that whenever the same index appears twice in an equation, summation over this index is implied; we will, however, not distinguish between upper and lower indices.) With this definition of the strain, the elasticity equations then read as

            -\[
+<p> does and is typically called the $\partial_k=\frac{\partial}{\partial x_k}$. We will also use the Einstein summation convention that whenever the same index appears twice in an equation, summation over this index is implied; we will, however, not distinguish between upper and lower indices.) With this definition of the strain, the elasticity equations then read as

            +\[
   -
   \text{div}\,
   ({\mathbf C} \varepsilon(\mathbf u))
   =
   \mathbf f,
-\] +\]" src="form_7364.png"/>

            -

            which you can think of as the more natural generalization of the Laplace equation to vector-valued problems. (The form shown first is equivalent to this form because the tensor ${\mathbf C}$ has certain symmetries, namely that $C_{ijkl}=C_{ijlk}$, and consequently ${\mathbf C} \varepsilon(\mathbf u)_{kl}
-= {\mathbf C} \nabla\mathbf u$.)

            +

            which you can think of as the more natural generalization of the Laplace equation to vector-valued problems. (The form shown first is equivalent to this form because the tensor ${\mathbf C}$ has certain symmetries, namely that $C_{ijkl}=C_{ijlk}$, and consequently ${\mathbf C} \varepsilon(\mathbf u)_{kl}
+= {\mathbf C} \nabla\mathbf u$.)

            One can of course alternatively write these equations in component form:

            -\[
+<picture><source srcset=\[
   -
   \partial_j (c_{ijkl} \varepsilon_{kl})
   =
   f_i,
   \qquad
   i=1\ldots d.
-\] +\]" src="form_7367.png"/>

            -

            In many cases, one knows that the material under consideration is isotropic, in which case by introduction of the two coefficients $\lambda$ and $\mu$ the coefficient tensor reduces to

            -\[
+<p>In many cases, one knows that the material under consideration is isotropic, in which case by introduction of the two coefficients <picture><source srcset=$\lambda$ and $\mu$ the coefficient tensor reduces to

            +\[
   c_{ijkl}
   =
   \lambda \delta_{ij} \delta_{kl} +
   \mu (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}).
-\] +\]" src="form_7368.png"/>

            The elastic equations can then be rewritten in much simpler a form:

            -\[
+<picture><source srcset=\[
    -
    \nabla \lambda (\nabla\cdot {\mathbf u})
    -
@@ -193,10 +193,10 @@
    \nabla\cdot \mu (\nabla {\mathbf u})^T
    =
    {\mathbf f},
-\] +\]" src="form_7369.png"/>

            and the respective bilinear form is then

            -\[
+<picture><source srcset=\[
   a({\mathbf u}, {\mathbf v}) =
   \left(
     \lambda \nabla\cdot {\mathbf u}, \nabla\cdot {\mathbf v}
@@ -211,10 +211,10 @@
   \left(
     \mu \partial_k u_l, \partial_l v_k
   \right)_\Omega,
-\] +\]" src="form_7370.png"/>

            or also writing the first term a sum over components:

            -\[
+<picture><source srcset=\[
   a({\mathbf u}, {\mathbf v}) =
   \sum_{k,l}
   \left(
@@ -230,18 +230,18 @@
   \left(
     \mu \partial_k u_l, \partial_l v_k
   \right)_\Omega.
-\] +\]" src="form_7371.png"/>

            -
            Note
            As written, the equations above are generally considered to be the right description for the displacement of three-dimensional objects if the displacement is small and we can assume that Hooke's law is valid. In that case, the indices $i,j,k,l$ above all run over the set $\{1,2,3\}$ (or, in the C++ source, over $\{0,1,2\}$). However, as is, the program runs in 2d, and while the equations above also make mathematical sense in that case, they would only describe a truly two-dimensional solid. In particular, they are not the appropriate description of an $x-y$ cross-section of a body infinite in the $z$ direction; this is in contrast to many other two-dimensional equations that can be obtained by assuming that the body has infinite extent in $z$-direction and that the solution function does not depend on the $z$ coordinate. On the other hand, there are equations for two-dimensional models of elasticity; see for example the Wikipedia article on plane strain, antiplane shear and plan stress.
            +
            Note
            As written, the equations above are generally considered to be the right description for the displacement of three-dimensional objects if the displacement is small and we can assume that Hooke's law is valid. In that case, the indices $i,j,k,l$ above all run over the set $\{1,2,3\}$ (or, in the C++ source, over $\{0,1,2\}$). However, as is, the program runs in 2d, and while the equations above also make mathematical sense in that case, they would only describe a truly two-dimensional solid. In particular, they are not the appropriate description of an $x-y$ cross-section of a body infinite in the $z$ direction; this is in contrast to many other two-dimensional equations that can be obtained by assuming that the body has infinite extent in $z$-direction and that the solution function does not depend on the $z$ coordinate. On the other hand, there are equations for two-dimensional models of elasticity; see for example the Wikipedia article on plane strain, antiplane shear and plan stress.

            But let's get back to the original problem. How do we assemble the matrix for such an equation? A very long answer with a number of different alternatives is given in the documentation of the Handling vector valued problems topic. Historically, the solution shown below was the only one available in the early years of the library. It turns out to also be the fastest. On the other hand, if a few per cent of compute time do not matter, there are simpler and probably more intuitive ways to assemble the linear system than the one discussed below but that weren't available until several years after this tutorial program was first written; if you are interested in them, take a look at the Handling vector valued problems topic.

            -

            Let us go back to the question of how to assemble the linear system. The first thing we need is some knowledge about how the shape functions work in the case of vector-valued finite elements. Basically, this comes down to the following: let $n$ be the number of shape functions for the scalar finite element of which we build the vector element (for example, we will use bilinear functions for each component of the vector-valued finite element, so the scalar finite element is the FE_Q(1) element which we have used in previous examples already, and $n=4$ in two space dimensions). Further, let $N$ be the number of shape functions for the vector element; in two space dimensions, we need $n$ shape functions for each component of the vector, so $N=2n$. Then, the $i$th shape function of the vector element has the form

            -\[
+<p>Let us go back to the question of how to assemble the linear system. The first thing we need is some knowledge about how the shape functions work in the case of vector-valued finite elements. Basically, this comes down to the following: let <picture><source srcset=$n$ be the number of shape functions for the scalar finite element of which we build the vector element (for example, we will use bilinear functions for each component of the vector-valued finite element, so the scalar finite element is the FE_Q(1) element which we have used in previous examples already, and $n=4$ in two space dimensions). Further, let $N$ be the number of shape functions for the vector element; in two space dimensions, we need $n$ shape functions for each component of the vector, so $N=2n$. Then, the $i$th shape function of the vector element has the form

            +\[
   \Phi_i({\mathbf x}) = \varphi_{\text{base}(i)}({\mathbf x})\ {\mathbf e}_{\text{comp}(i)},
-\] +\]" src="form_7376.png"/>

            -

            where $e_l$ is the $l$th unit vector, $\text{comp}(i)$ is the function that tells us which component of $\Phi_i$ is the one that is nonzero (for each vector shape function, only one component is nonzero, and all others are zero). $\varphi_{\text{base}(i)}(x)$ describes the space dependence of the shape function, which is taken to be the $\text{base}(i)$-th shape function of the scalar element. Of course, while $i$ is in the range $0,\ldots,N-1$, the functions $\text{comp}(i)$ and $\text{base}(i)$ have the ranges $0,1$ (in 2D) and $0,\ldots,n-1$, respectively.

            +

            where $e_l$ is the $l$th unit vector, $\text{comp}(i)$ is the function that tells us which component of $\Phi_i$ is the one that is nonzero (for each vector shape function, only one component is nonzero, and all others are zero). $\varphi_{\text{base}(i)}(x)$ describes the space dependence of the shape function, which is taken to be the $\text{base}(i)$-th shape function of the scalar element. Of course, while $i$ is in the range $0,\ldots,N-1$, the functions $\text{comp}(i)$ and $\text{base}(i)$ have the ranges $0,1$ (in 2D) and $0,\ldots,n-1$, respectively.

            For example (though this sequence of shape functions is not guaranteed, and you should not rely on it), the following layout could be used by the library:

            -\begin{eqnarray*}
+<picture><source srcset=\begin{eqnarray*}
   \Phi_0({\mathbf x}) &=&
   \left(\begin{array}{c}
     \varphi_0({\mathbf x}) \\ 0
@@ -262,44 +262,44 @@
     0 \\ \varphi_1({\mathbf x})
   \end{array}\right),
   \ldots
-\end{eqnarray*} +\end{eqnarray*}" src="form_7384.png"/>

            where here

            -\[
+<picture><source srcset=\[
   \text{comp}(0)=0, \quad  \text{comp}(1)=1, \quad  \text{comp}(2)=0, \quad  \text{comp}(3)=1, \quad  \ldots
-\] +\]" src="form_7385.png"/>

            -\[
+<picture><source srcset=\[
   \text{base}(0)=0, \quad  \text{base}(1)=0, \quad  \text{base}(2)=1, \quad  \text{base}(3)=1, \quad  \ldots
-\] +\]" src="form_7386.png"/>

            -

            In all but very rare cases, you will not need to know which shape function $\varphi_{\text{base}(i)}$ of the scalar element belongs to a shape function $\Phi_i$ of the vector element. Let us therefore define

            -\[
+<p>In all but very rare cases, you will not need to know which shape function <picture><source srcset=$\varphi_{\text{base}(i)}$ of the scalar element belongs to a shape function $\Phi_i$ of the vector element. Let us therefore define

            +\[
   \phi_i = \varphi_{\text{base}(i)}
-\] +\]" src="form_7388.png"/>

            by which we can write the vector shape function as

            -\[
+<picture><source srcset=\[
   \Phi_i({\mathbf x}) = \phi_{i}({\mathbf x})\ {\mathbf e}_{\text{comp}(i)}.
-\] +\]" src="form_7389.png"/>

            -

            You can now safely forget about the function $\text{base}(i)$, at least for the rest of this example program.

            +

            You can now safely forget about the function $\text{base}(i)$, at least for the rest of this example program.

            Now using this vector shape functions, we can write the discrete finite element solution as

            -\[
+<picture><source srcset=\[
   {\mathbf u}_h({\mathbf x}) =
   \sum_i \Phi_i({\mathbf x})\ U_i
-\] +\]" src="form_7390.png"/>

            -

            with scalar coefficients $U_i$. If we define an analog function ${\mathbf v}_h$ as test function, we can write the discrete problem as follows: Find coefficients $U_i$ such that

            -\[
+<p> with scalar coefficients <picture><source srcset=$U_i$. If we define an analog function ${\mathbf v}_h$ as test function, we can write the discrete problem as follows: Find coefficients $U_i$ such that

            +\[
   a({\mathbf u}_h, {\mathbf v}_h) = ({\mathbf f}, {\mathbf v}_h)
   \qquad
   \forall {\mathbf v}_h.
-\] +\]" src="form_7392.png"/>

            -

            If we insert the definition of the bilinear form and the representation of ${\mathbf u}_h$ and ${\mathbf v}_h$ into this formula:

            -\begin{eqnarray*}
+<p>If we insert the definition of the bilinear form and the representation of <picture><source srcset=${\mathbf u}_h$ and ${\mathbf v}_h$ into this formula:

            +\begin{eqnarray*}
   \sum_{i,j}
     U_i V_j
   \sum_{k,l}
@@ -324,11 +324,11 @@
     f_l,
     (\Phi_j)_l
   \right)_\Omega.
-\end{eqnarray*} +\end{eqnarray*}" src="form_7394.png"/>

            -

            We note that here and in the following, the indices $k,l$ run over spatial directions, i.e. $0\le k,l < d$, and that indices $i,j$ run over degrees of freedom.

            +

            We note that here and in the following, the indices $k,l$ run over spatial directions, i.e. $0\le k,l < d$, and that indices $i,j$ run over degrees of freedom.

            The local stiffness matrix on cell $K$ therefore has the following entries:

            -\[
+<picture><source srcset=\[
   A^K_{ij}
   =
   \sum_{k,l}
/usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html	2024-11-15 06:44:33.303706178 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_81.html	2024-11-15 06:44:33.307706214 +0000
@@ -160,8 +160,8 @@
 <p><a class=

            Time-Harmonic Maxwell's Equations with interface conditions

            We start the discussion with a short derivation of the governing equations and some literature references.

            Derivation of time-harmonic Maxwell's equations

            -

            In two ( $d=2$) or three ( $d=3$) spatial dimensions, the time evolution of an electromagnetic wave $(\mathbf{E},\mathbf{H})$ that consists of an electric field component $\mathbf{E}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ and a magnetic field component $\mathbf{H}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ is described by Maxwell's equations [Schwartz1972], [Monk2003] :

            -\begin{align*}
+<p>In two ( <picture><source srcset=$d=2$) or three ( $d=3$) spatial dimensions, the time evolution of an electromagnetic wave $(\mathbf{E},\mathbf{H})$ that consists of an electric field component $\mathbf{E}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ and a magnetic field component $\mathbf{H}(t,\mathbf{x})\;:\;\mathbb{R}\times\mathbb{R}^d\to\mathbb{R}^d$ is described by Maxwell's equations [Schwartz1972], [Monk2003] :

            +\begin{align*}
   \frac{\partial}{\partial t} \mathbf{H} + \nabla \times \mathbf{E} &= -\mathbf{M}_a,
   \\
   \nabla \cdot \mathbf{H} &= \rho_m,
@@ -169,23 +169,23 @@
   \frac{\partial}{\partial t} (\varepsilon\mathbf{E}) - \nabla\times(\mu^{-1}\mathbf{H}) &= - \mathbf{J}_a,
   \\
   \nabla\cdot(\varepsilon\mathbf{E}) &= \rho.
-\end{align*} +\end{align*}" src="form_7409.png"/>

            -

            Here, $\nabla\times$ is the curl operator, $\nabla\cdot$ is the divergence operator, $\varepsilon$ is the electric permittivity, $\mu$ is the magnetic permeability, $\rho$ is the electric charge density, and $\rho_m$ is a corresponding (hypothetical) magnetic monopole density. $\mathbf{J}_a$ and $\mathbf{M}_a$ are the electric and magnetic flux densities which are related to their respective charge densities by the conservation equations [Schwartz1972]

            -\[
+<p> Here, <picture><source srcset=$\nabla\times$ is the curl operator, $\nabla\cdot$ is the divergence operator, $\varepsilon$ is the electric permittivity, $\mu$ is the magnetic permeability, $\rho$ is the electric charge density, and $\rho_m$ is a corresponding (hypothetical) magnetic monopole density. $\mathbf{J}_a$ and $\mathbf{M}_a$ are the electric and magnetic flux densities which are related to their respective charge densities by the conservation equations [Schwartz1972]

            +\[
 \frac{\partial}{\partial t} \rho + \nabla\cdot\mathbf{J}_a = 0
 \text{ and }
 \frac{\partial}{\partial t} \rho_m + \nabla\cdot\mathbf{M}_a = 0.
-\] +\]" src="form_7415.png"/>

            -

            We now make the important assumption that the material parameters $\varepsilon$ and $\mu$ are time-independent and that the fields $\mathbf{E}$ and $\mathbf{H}$, the fluxes $\mathbf{M}_a$ and $\mathbf{J}_a$, as well as the densities $\rho$ and $\rho_m$ are all time-harmonic, i.e., their time evolution is completely described by

            -\[
+<p>We now make the important assumption that the material parameters <picture><source srcset=$\varepsilon$ and $\mu$ are time-independent and that the fields $\mathbf{E}$ and $\mathbf{H}$, the fluxes $\mathbf{M}_a$ and $\mathbf{J}_a$, as well as the densities $\rho$ and $\rho_m$ are all time-harmonic, i.e., their time evolution is completely described by

            +\[
   \mathbf{F}(\mathbf{x},t) = \text{Re}\{e^{-i\omega
   t}\tilde{\mathbf{F}}(\mathbf{x})\},
-\] +\]" src="form_7416.png"/>

            -

            in which $\omega$ is the temporal angular frequency and $\tilde{\mathbf{F}}(\mathbf{x})$ is a corresponding complex-valued vector field (or density). Inserting this ansatz into Maxwell's equations, substituting the charge conservation equations and some minor algebra then yields the so-called time-harmonic Maxwell's equations:

            -\begin{align*}
+<p> in which <picture><source srcset=$\omega$ is the temporal angular frequency and $\tilde{\mathbf{F}}(\mathbf{x})$ is a corresponding complex-valued vector field (or density). Inserting this ansatz into Maxwell's equations, substituting the charge conservation equations and some minor algebra then yields the so-called time-harmonic Maxwell's equations:

            +\begin{align*}
   -i\omega \tilde{\mathbf{H}} + \nabla \times \tilde{\mathbf{E}} &=
   -\tilde{\mathbf{M}}_a,
   \\
@@ -197,67 +197,67 @@
   \\
   \nabla\cdot(\varepsilon\tilde{\mathbf{E}}) &=
   \frac{1}{i\omega}\nabla\cdot\tilde{\mathbf{J}}_a.
-\end{align*} +\end{align*}" src="form_7418.png"/>

            -

            For the sake of better readability we will now drop the tilde and simply write $\mathbf{E}(\mathbf{x})$, $\mathbf{H}(\mathbf{x})$, etc., when referring to the time-harmonic fields.

            +

            For the sake of better readability we will now drop the tilde and simply write $\mathbf{E}(\mathbf{x})$, $\mathbf{H}(\mathbf{x})$, etc., when referring to the time-harmonic fields.

            Jump conditions on lower dimensional interfaces

            -

            Graphene is a two-dimensional carbon allotrope with a single atom layer that is arranged in a honeycomb lattice [Geim2004]. Due to its atomic thickness it is an example of a so-called 2D material: Compared to the other spatial dimensions (where graphene samples can reach up to several centimeters) the atomistic thickness of graphene typically ranges around 2.5 ångstrom ( $2.5\times10^{-10}\text{m}$). We will thus model graphene as a lower-dimensional interface $\Sigma$ embedded into the computational domain $\Omega\subset\mathbb{R}^d$. More precisely, $\Sigma$ is a two-dimensional sheet in three spatial dimensions, or a one-dimensional line in two spatial dimensions. The special electronic structure of graphene gives rise to a current density on the lower-dimensional interface that is modeled with an effective surface conductivity $\sigma^\Sigma$ obeying Ohm's Law:

            -\[
+<p>Graphene is a two-dimensional carbon allotrope with a <em>single</em> atom layer that is arranged in a honeycomb lattice <b>[Geim2004]</b>. Due to its atomic thickness it is an example of a so-called 2D material: Compared to the other spatial dimensions (where graphene samples can reach up to several centimeters) the atomistic thickness of graphene typically ranges around 2.5 ångstrom ( <picture><source srcset=$2.5\times10^{-10}\text{m}$). We will thus model graphene as a lower-dimensional interface $\Sigma$ embedded into the computational domain $\Omega\subset\mathbb{R}^d$. More precisely, $\Sigma$ is a two-dimensional sheet in three spatial dimensions, or a one-dimensional line in two spatial dimensions. The special electronic structure of graphene gives rise to a current density on the lower-dimensional interface that is modeled with an effective surface conductivity $\sigma^\Sigma$ obeying Ohm's Law:

            +\[
   \mathbf{J}^\Sigma=\sigma^\Sigma\,\mathbf{E}_T
-\] +\]" src="form_7425.png"/>

            -

            in which $\mathbf{J}^\Sigma$ is the surface current density, $\mathbf{E}_T$ denotes the tangential part of the electric field $\mathbf{E}$, and $\sigma^\Sigma$ is an appropriately chosen surface conductivity that will be discussed in more detail below. The surface current density gives rise to a jump condition on $\Sigma$ in the tangential component of the magnetic field. This is best seen by visualizing Ampère's law:

            +

            in which $\mathbf{J}^\Sigma$ is the surface current density, $\mathbf{E}_T$ denotes the tangential part of the electric field $\mathbf{E}$, and $\sigma^\Sigma$ is an appropriately chosen surface conductivity that will be discussed in more detail below. The surface current density gives rise to a jump condition on $\Sigma$ in the tangential component of the magnetic field. This is best seen by visualizing Ampère's law:

            Visualization of Ohm's law and Ampère's law leading to a jump condition over the interface

            -

            and then taking the limit of the upper and lower part of the line integral approaching the sheet. In contrast, the tangential part of the electric field is continuous. By fixing a unit normal $\mathbf{\nu}$ on the hypersurface $\Sigma$ both jump conditions are

            -\begin{align*}
+<p>and then taking the limit of the upper and lower part of the line integral approaching the sheet. In contrast, the tangential part of the electric field is continuous. By fixing a unit normal <picture><source srcset=$\mathbf{\nu}$ on the hypersurface $\Sigma$ both jump conditions are

            +\begin{align*}
 \mathbf{\nu} \times \left[(\mu^{-1}\mathbf{H})^+ - (\mu^{-1}\mathbf{H})^-\right]|_{\Sigma}
 &= \sigma^{\Sigma}\left[(\mathbf{\nu}\times \mathbf{E}\times \mathbf{\nu})\right]|_{\Sigma},
 \\
 \mathbf{\nu} \times \left[\mathbf{E}^+ - \mathbf{E}^-\right]|_{\Sigma} &= 0.
-\end{align*} +\end{align*}" src="form_7429.png"/>

            -

            The notation $\mathbf{F}^\pm$ indicates the limit values of the field when approaching the interface from above or below the interface: $\mathbf{F}^\pm(\mathbf{x})=\lim_{\delta\to0,\delta>0}\mathbf{F}(\mathbf{x}\pm\delta\mathbf{\nu})$.

            +

            The notation $\mathbf{F}^\pm$ indicates the limit values of the field when approaching the interface from above or below the interface: $\mathbf{F}^\pm(\mathbf{x})=\lim_{\delta\to0,\delta>0}\mathbf{F}(\mathbf{x}\pm\delta\mathbf{\nu})$.

            Rescaling

            We will be using a rescaled version of the Maxwell's equations described above. The rescaling has the following key differences:

            1. -Every length is rescaled by the free-space wavelength $2\pi k^{-1}
-\dealcoloneq 2\pi(\omega\sqrt{\varepsilon_0\mu_0})^{-1}$, in which $\varepsilon_0$ and $\mu_0$ denote the vacuum dielectric permittivity and magnetic permeability, respectively.
            2. +Every length is rescaled by the free-space wavelength $2\pi k^{-1}
+\dealcoloneq 2\pi(\omega\sqrt{\varepsilon_0\mu_0})^{-1}$, in which $\varepsilon_0$ and $\mu_0$ denote the vacuum dielectric permittivity and magnetic permeability, respectively.
            3. -$\mathbf{E}$, $\mathbf{H}$, $\mathbf{J}_a$, $\mathbf{M}_a$ are all rescaled by typical electric current strength $J_0$, i.e., the strength of the prescribed dipole source at location $a$ in the $e_i$ direction in Cartesian coordinates (here, $\delta$ is the Dirac delta operator).

              -\[
+<picture><source srcset=$\mathbf{E}$, $\mathbf{H}$, $\mathbf{J}_a$, $\mathbf{M}_a$ are all rescaled by typical electric current strength $J_0$, i.e., the strength of the prescribed dipole source at location $a$ in the $e_i$ direction in Cartesian coordinates (here, $\delta$ is the Dirac delta operator).

              +\[
 \mathbf{J}_a = J_0 \mathbf{e}_i\delta(x-a)
-\] +\]" src="form_7436.png"/>

            -

            Accordingly, our electric permittivity and magnetic permeability are rescaled by $\varepsilon_0$ and $\mu_0$ as

            -\[
+<p>Accordingly, our electric permittivity and magnetic permeability are rescaled by <picture><source srcset=$\varepsilon_0$ and $\mu_0$ as

            +\[
 \mu_r = \frac{1}{\mu_0}\mu
 \text{ and }
 \varepsilon_r = \frac{1}{\varepsilon_0}\varepsilon.
-\] +\]" src="form_7437.png"/>

            -

            We use the free-space wave number $k_0 = \omega\sqrt{\varepsilon_0\mu_0}$ and the dipole strength, $J_0$ to arrive at the following rescaling of the vector fields and coordinates:

            -\begin{align*}
+<p>We use the free-space wave number <picture><source srcset=$k_0 = \omega\sqrt{\varepsilon_0\mu_0}$ and the dipole strength, $J_0$ to arrive at the following rescaling of the vector fields and coordinates:

            +\begin{align*}
 \hat{x} = k_0x, &\qquad
 \hat{\nabla} = \frac{1}{k_0}\nabla,\\
 \hat{\mathbf{H}} = \frac{k_0}{J_0}\mu^{-1}\mathbf{H},&\qquad
 \hat{\mathbf{E}} = \frac{k_0^2}{\omega\mu_0 J_0}\mathbf{E},\\
 \hat{\mathbf{J}}_a = \frac{1}{J_0}\mathbf{J}_a,&\qquad
 \hat{\mathbf{M}}_a = \frac{k_0}{\omega\mu_0 J_0}\mathbf{M}_a.
-\end{align*} +\end{align*}" src="form_7439.png"/>

            Finally, the interface conductivity is rescaled as

            -\[
+<picture><source srcset=\[
 \sigma^{\Sigma}_r = \sqrt{\frac{\mu_0}{\varepsilon_0}}\sigma^{\Sigma}.
-\] +\]" src="form_7440.png"/>

            Accordingly, our rescaled equations are

            -\begin{align*}
+<picture><source srcset=\begin{align*}
   -i\mu_r \hat{\mathbf{H}} + \hat{\nabla} \times \hat{\mathbf{E}}
   &= -\hat{\mathbf{M}}_a,
   \\
@@ -269,19 +269,19 @@
   \\
   \nabla\cdot(\varepsilon\mathbf{E}) &= \frac{1}{i\omega}\hat{\nabla}
   \cdot\hat{\mathbf{J}}_a.
-\end{align*} +\end{align*}" src="form_7441.png"/>

            We will omit the hat in further discussion for ease of notation.

            Variational Statement

            -

            Let $\Omega \subset \mathbb{R}^n$, $(n = 2,3)$ be a simply connected and bounded domain with Lipschitz-continuous and piecewise smooth boundary, $\partial\Omega$. Let $\Sigma$ be an oriented, Lipschitz-continuous, piecewise smooth hypersurface. Fix a normal field $\nu$ on $\Sigma$ and let $n$ denote the outer normal vector on $\partial\Omega$.

            -

            In order to arrive at the variational form, we will substitute for $\mathbf{H}$ in the first equation and obtain

            -\[
+<p>Let <picture><source srcset=$\Omega \subset \mathbb{R}^n$, $(n = 2,3)$ be a simply connected and bounded domain with Lipschitz-continuous and piecewise smooth boundary, $\partial\Omega$. Let $\Sigma$ be an oriented, Lipschitz-continuous, piecewise smooth hypersurface. Fix a normal field $\nu$ on $\Sigma$ and let $n$ denote the outer normal vector on $\partial\Omega$.

            +

            In order to arrive at the variational form, we will substitute for $\mathbf{H}$ in the first equation and obtain

            +\[
 \nabla \times (\mu_r^{-1}\nabla\times\mathbf{E}) - \varepsilon_r \mathbf{E}
 = i\mathbf{J}_a - \nabla\times (\mu_r^{-1}\mathbf{M}_a).
-\] +\]" src="form_7444.png"/>

            -

            Now, consider a smooth test function $\varphi$ with complex conjugate $\bar{\varphi}$. Multiply both sides of the above equation by $\bar{\varphi}$ and integrate by parts in $\Omega\backslash\Sigma$.

            -\[
+<p>Now, consider a smooth test function <picture><source srcset=$\varphi$ with complex conjugate $\bar{\varphi}$. Multiply both sides of the above equation by $\bar{\varphi}$ and integrate by parts in $\Omega\backslash\Sigma$.

            +\[
 \int_\Omega (\mu_r^{-1}\nabla\times\mathbf{E})\cdot (\nabla\times\bar{\varphi})\;\text{d}x
 - \int_\Omega \varepsilon_r\mathbf{E} \cdot \bar{\varphi}\;\text{d}x
 - \int_\Sigma [\nu \times (\mu_r^{-1}\nabla\times\mathbf{E} +
@@ -290,32 +290,32 @@
 \mu^{-1}\mathbf{M}_a)) \cdot \bar{\varphi}_T\;\text{d}o_x =
 i\int_\Omega \mathbf{J}_a \cdot \bar{\varphi}\;\text{d}x
 - \int_\Omega \mu_r^{-1}\mathbf{M}_a \cdot (\nabla \times \bar{\varphi})\;\text{d}x.
-\] +\]" src="form_7447.png"/>

            -

            We use the subscript $T$ to denote the tangential part of the given vector and $[\cdot]_{\Sigma}$ to denote a jump over $\Sigma$, i.e.,

            -\[
+<p>We use the subscript <picture><source srcset=$T$ to denote the tangential part of the given vector and $[\cdot]_{\Sigma}$ to denote a jump over $\Sigma$, i.e.,

            +\[
   \mathbf{F}_T = (\mathbf{\nu}\times \mathbf{F})\times\mathbf{\nu}
   \text{ and }
   [\mathbf{F}]_{\Sigma}(\mathbf{x}) = \lim\limits_{s\searrow 0}(\mathbf{F}(\mathbf{x}+s\mathbf{\nu})-\mathbf{F}(\mathbf{x}-s\mathbf{\nu}))
-\] +\]" src="form_7449.png"/>

            -

            for $\mathbf{x}\in \Sigma$.

            -

            For the computational domain $\Omega$, we introduce the absorbing boundary condition at $\partial\Omega$, which is obtained by using a first-order approximation of the Silver-Müller radiation condition, truncated at $\partial\Omega$ [Monk2003].

            -\[
+<p> for <picture><source srcset=$\mathbf{x}\in \Sigma$.

            +

            For the computational domain $\Omega$, we introduce the absorbing boundary condition at $\partial\Omega$, which is obtained by using a first-order approximation of the Silver-Müller radiation condition, truncated at $\partial\Omega$ [Monk2003].

            +\[
 \nu\times\mathbf{H}+\sqrt{\mu_r^{-1}\varepsilon_r}\mathbf{E}=0\qquad x\in\partial\Omega
-\] +\]" src="form_7451.png"/>

            -

            We assume that $\mu_r^{-1}$ and $\varepsilon$ have well-defined square roots. In our numerical computation, we combine the above absorbing boundary condition with a PML.

            +

            We assume that $\mu_r^{-1}$ and $\varepsilon$ have well-defined square roots. In our numerical computation, we combine the above absorbing boundary condition with a PML.

            The jump condition can be expressed as a weak discontinuity as follows:

            -\[
+<picture><source srcset=\[
 [\nu \times (\mu_r^{-1}\nabla\times\mathbf{E} + \mu^{-1}\mathbf{M}_a)]_{\Sigma}
 = i\sigma_r^{\Sigma}\mathbf{E}_T,\qquad \text{on }\Sigma\\
 \nu \times (\mu_r^{-1}\nabla\times\mathbf{E} + \mu^{-1}\mathbf{M}_a)
/usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html differs (HTML document, UTF-8 Unicode text, with very long lines)
--- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html	2024-11-15 06:44:33.395707000 +0000
+++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_82.html	2024-11-15 06:44:33.395707000 +0000
@@ -169,21 +169,21 @@
 u & = 0 \quad \mbox{on } \partial\Omega,
 \end{align*}

            -

            where $\Omega\subset\mathbb{R}^d$ $(d=2,3)$ is an open bounded Lipschitz domain and $f\in L^2(\Omega)$. This is the same problem we have already considered in step-47, but we will take here a different approach towards solving it: Rather than using continuous finite elements and the interior penalty method, we consider discontinuous finite elements and the local discontinuous Galerkin method defined using lifting operators.

            +

            where $\Omega\subset\mathbb{R}^d$ $(d=2,3)$ is an open bounded Lipschitz domain and $f\in L^2(\Omega)$. This is the same problem we have already considered in step-47, but we will take here a different approach towards solving it: Rather than using continuous finite elements and the interior penalty method, we consider discontinuous finite elements and the local discontinuous Galerkin method defined using lifting operators.

            The weak formulation of this problem reads as follows: find $u\in H_0^2(\Omega)$ such that

            \[
 \int_{\Omega}D^2u:D^2v = \int_{\Omega}fv \qquad \forall \, v\in H_0^2(\Omega),
 \]

            -

            where $D^2v$ denotes the Hessian of $v$ and $H_0^2(\Omega)\dealcoloneq\{v\in H^2(\Omega): \,\, v=0 \mbox{ and } \nabla v=\mathbf{0} \,\, \mbox{ on } \partial\Omega\}$. Using so-called lifting operators as well as the Nitsche approach to impose the homogeneous Dirichlet boundary conditions, the LDG approximation of this problem consists of replacing the Hessians by discrete Hessians (see below) and adding penalty terms involving properly scaled jump terms. In particular, the versatility of the method described below is of particular interest for nonlinear problems or problems with intricate weak formulations for which the design of discrete algorithms is challenging.

            +

            where $D^2v$ denotes the Hessian of $v$ and $H_0^2(\Omega)\dealcoloneq\{v\in H^2(\Omega): \,\, v=0 \mbox{ and } \nabla v=\mathbf{0} \,\, \mbox{ on } \partial\Omega\}$. Using so-called lifting operators as well as the Nitsche approach to impose the homogeneous Dirichlet boundary conditions, the LDG approximation of this problem consists of replacing the Hessians by discrete Hessians (see below) and adding penalty terms involving properly scaled jump terms. In particular, the versatility of the method described below is of particular interest for nonlinear problems or problems with intricate weak formulations for which the design of discrete algorithms is challenging.

            Discretization

            Finite element spaces

            -

            For $h>0$, let $\mathcal{T}_h$ be a partition of $\Omega$ into quadrilateral (hexahedral if $d=3$) elements $K$ of diameter $h_{K}\leq h$ and let $\mathcal{E}_h=\mathcal{E}_h^0\cup\mathcal{E}_h^b$ denote the set of (interior and boundary) faces. We restrict the discussion to conforming subdivisions to avoid technicalities already addressed in previous tutorials. The diameter of $e \in \mathcal{E}_h$ is denoted $h_e$. For any integer $k\ge 2$, we introduce the (discontinuous) finite element space

            +

            For $h>0$, let $\mathcal{T}_h$ be a partition of $\Omega$ into quadrilateral (hexahedral if $d=3$) elements $K$ of diameter $h_{K}\leq h$ and let $\mathcal{E}_h=\mathcal{E}_h^0\cup\mathcal{E}_h^b$ denote the set of (interior and boundary) faces. We restrict the discussion to conforming subdivisions to avoid technicalities already addressed in previous tutorials. The diameter of $e \in \mathcal{E}_h$ is denoted $h_e$. For any integer $k\ge 2$, we introduce the (discontinuous) finite element space

            \[
 \mathbb{V}_h\dealcoloneq\left\{v_h\in L^2(\Omega): \,\, v_h|_K\circ F_{K}\in\mathbb{Q}_k \quad \forall \, K \in\mathcal{T}_h \right\},
 \]

            -

            where $F_{K}$ is the map from the reference element $\hat{K}$ (unit square/cube) to the physical element $K$. For $v_h\in\mathbb{V}_h$, the piecewise differential operators are denoted with a subscript $h$, for instance $\nabla_h v_h|_K=\nabla(v_h|_K)$ and $D_h^2 v_h=\nabla_h\nabla_h v_h$. For $e\in\mathcal{E}_h$, we assign a normal $\mathbf{n}_e$. The choice of normal is irrelevant except that when $e$ is a boundary face, $\mathbf{n}_e$ is the normal pointing outward $\Omega$.

            +

            where $F_{K}$ is the map from the reference element $\hat{K}$ (unit square/cube) to the physical element $K$. For $v_h\in\mathbb{V}_h$, the piecewise differential operators are denoted with a subscript $h$, for instance $\nabla_h v_h|_K=\nabla(v_h|_K)$ and $D_h^2 v_h=\nabla_h\nabla_h v_h$. For $e\in\mathcal{E}_h$, we assign a normal $\mathbf{n}_e$. The choice of normal is irrelevant except that when $e$ is a boundary face, $\mathbf{n}_e$ is the normal pointing outward $\Omega$.

            Jumps, averages, and discrete reconstruction of differential operators

            The piecewise differential operators do not have enough information to be accurate approximations of their continuous counterparts. They are missing inter-element information.

            This leads to the introductions of jump and average operators:

            @@ -199,7 +199,7 @@ \end{array}\right. \]" src="form_7520.png"/>

            -

            respectively, where $K_1$ and $K_2$ are the two elements adjacent to $e$ so that $\mathbf{n}_e$ points from $K_1$ to $K_2$ (with obvious modification when $e$ is a boundary edge). These are the same operators that we have previously used not only in step-47, but also in other tutorials related to discontinuous Galerkin methods (e.g., step-12).

            +

            respectively, where $K_1$ and $K_2$ are the two elements adjacent to $e$ so that $\mathbf{n}_e$ points from $K_1$ to $K_2$ (with obvious modification when $e$ is a boundary edge). These are the same operators that we have previously used not only in step-47, but also in other tutorials related to discontinuous Galerkin methods (e.g., step-12).

            With these notations, we are now in position to define the discrete/reconstructed Hessian $H_h(v_h)\in\left[L^2(\Omega)\right]^{d\times d}$ of $v_h\in\mathbb{V}_h$; that is, something that will take the role of $D^2 v$ in the definition of the weak formulation above when moving from the continuous to the discrete formulation. We first consider two local lifting operators $r_e:[L^2(e)]^d\rightarrow[\mathbb{V}_h]^{d\times d}$ and $b_e:L^2(e)\rightarrow[\mathbb{V}_h]^{d\times d}$ defined for $e\in\mathcal{E}_h$ by, respectively,

            \[
 r_e\left(\boldsymbol{\phi}\right) \in [\mathbb{V}_h]^{d\times d}: \,
@@ -212,7 +212,7 @@
 \int_{\Omega} \tau_h : b_e(\phi) = \int_e\average{{\rm div}\, \tau_h}\cdot\mathbf{n}_e\phi \qquad \forall \, \tau_h\in [\mathbb{V}_h]^{d\times d}.
 \]

            -

            We have ${\rm supp}\,(r_e\left(\boldsymbol{\phi}\right))={\rm supp}\,(b_e(\phi))=\omega_e$, where $\omega_e$ denotes the patch of (one or two) elements having $e$ as part of their boundaries.

            +

            We have ${\rm supp}\,(r_e\left(\boldsymbol{\phi}\right))={\rm supp}\,(b_e(\phi))=\omega_e$, where $\omega_e$ denotes the patch of (one or two) elements having $e$ as part of their boundaries.

            The discrete Hessian operator $H_h:\mathbb{V}_h\rightarrow\left[L^2(\Omega)\right]^{2\times 2}$ is then given by

            \[
 H_h(v_h) \dealcoloneq D_h^2 v_h -R_h(\jump{\nabla_h v_h})+B_h(\jump{v_h}) \dealcoloneq D_h^2 v_h - \sum_{e\in\mathcal{E}_h}r_e\left(\jump{\nabla_h v_h}\right)+\sum_{e\in\mathcal{E}_h}b_e\left(\jump{v_h}\right).
@@ -226,7 +226,7 @@
 \int_{\Omega}H_h(v_h):\tau\longrightarrow \int_{\Omega}D^2v:\tau \qquad \mbox{as } \,\, h\rightarrow 0
 \]

            -

            for any sequence $\{v_h\}_{h>0}$ in $\mathbb{V}_h$ such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$. Let $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$. Integrating by parts twice we get

            +

            for any sequence $\{v_h\}_{h>0}$ in $\mathbb{V}_h$ such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$. Let $\tau\in [C_0^{\infty}(\Omega)]^{d\times d}$. Integrating by parts twice we get

            \[
 \int_{\Omega}D^2v:\tau = -\int_{\Omega}\nabla v\cdot \mbox{div}(\tau) = \int_{\Omega}v \mbox{ div}(\mbox{div}(\tau))
 \] @@ -241,13 +241,13 @@ \int_K v_h \mbox{ div}(\mbox{div}(\tau)) = -\int_K \nabla v_h\cdot \mbox{div}(\tau) + \int_{\partial K} v_h \mbox{ div}(\tau)\cdot \mathbf{n}_K =\int_K D^2v_h:\tau - \int_{\partial K}\nabla v_h\cdot (\tau\mathbf{n}_K) + \int_{\partial K} v_h \mbox{ div}(\tau)\cdot \mathbf{n}_K, \]" src="form_7541.png"/>

            -

            where $\mathbf{n}_K$ denotes the outward unit normal to $K$. Then, summing over the elements $K\in\mathcal{T}_h$ and using that $\tau$ is smooth, we obtain

            +

            where $\mathbf{n}_K$ denotes the outward unit normal to $K$. Then, summing over the elements $K\in\mathcal{T}_h$ and using that $\tau$ is smooth, we obtain

            \[
 \int_{\Omega} v_h \mbox{ div}(\mbox{div}(\tau)) = \int_{\Omega} D_h^2v_h:\tau - \sum_{e\in\mathcal{E}_h}\int_e\jump{\nabla_h v_h}\cdot \average{\tau}\mathbf{n}_e + \sum_{e\in\mathcal{E}_h}\int_e v_h \average{\mbox{div}(\tau)}\cdot \mathbf{n}_e
 \]

            -

            which reveals the motivation for the definition of the two lifting operators: if $\tau$ was an admissible test function, then the right-hand side would be equal to $\int_{\Omega}H_h(v_h):\tau$ and we would have shown the desired (weak) convergence. Actually, if we add and subtract $\tau_h$, the Lagrange interpolant of $\tau$ in $[\mathbb{V}_h\cap H_0^1(\Omega)]^{d\times d}$, we can show that the right-hand side is indeed equal to $\int_{\Omega}H_h(v_h):\tau$ up to terms that tends to zero as $h\rightarrow 0$ under appropriate assumptions on $v_h$.

            -

            It is worth mentioning that defining $H_h$ without the lifting operators $r_e$ and $b_e$ for $e\in\mathcal{E}_h^b$ would not affect the weak convergence property (the integrals over boundary faces are zero since $\tau$ is compactly supported in $\Omega$). However, they are included in $H_h$ to ensure that the solution of the discrete problem introduced in the next section satisfies the homogeneous Dirichlet boundary conditions in the limit $h\rightarrow 0$.

            +

            which reveals the motivation for the definition of the two lifting operators: if $\tau$ was an admissible test function, then the right-hand side would be equal to $\int_{\Omega}H_h(v_h):\tau$ and we would have shown the desired (weak) convergence. Actually, if we add and subtract $\tau_h$, the Lagrange interpolant of $\tau$ in $[\mathbb{V}_h\cap H_0^1(\Omega)]^{d\times d}$, we can show that the right-hand side is indeed equal to $\int_{\Omega}H_h(v_h):\tau$ up to terms that tends to zero as $h\rightarrow 0$ under appropriate assumptions on $v_h$.

            +

            It is worth mentioning that defining $H_h$ without the lifting operators $r_e$ and $b_e$ for $e\in\mathcal{E}_h^b$ would not affect the weak convergence property (the integrals over boundary faces are zero since $\tau$ is compactly supported in $\Omega$). However, they are included in $H_h$ to ensure that the solution of the discrete problem introduced in the next section satisfies the homogeneous Dirichlet boundary conditions in the limit $h\rightarrow 0$.

            LDG approximations

            The proposed LDG approximation of the bi-Laplacian problem reads: find $u_h\in\mathbb{V}_h$ such that

            \[
@@ -281,14 +281,14 @@
 </p>
  for any choice of penalty parameters <picture><source srcset=$\gamma_0,\gamma_1>0$. In other words, the stability of the method is ensured for any positive parameters. This is in contrast with interior penalty methods for which they need to be large enough. (See also the discussions about penalty parameters in the step-39, step-47, and step-74 programs.)

          3. -If $\{v_h\}_{h>0}\subset \mathbb{V}_h$ is a sequence uniformly bounded in the $\|\cdot\|_{H_h^2(\Omega)}$ norm such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$, then the discrete Hessian $H_h(v_h)$ weakly converges to $D^2v$ in $[L^2(\Omega)]^{2\times 2}$ as $h\rightarrow 0$. Note that the uniform boundedness assumption implies that the limit $v$ belongs to $H_0^2(\Omega)$.
          4. +If $\{v_h\}_{h>0}\subset \mathbb{V}_h$ is a sequence uniformly bounded in the $\|\cdot\|_{H_h^2(\Omega)}$ norm such that $v_h\rightarrow v$ in $L^2(\Omega)$ as $h\rightarrow 0$ for some $v\in H^2(\Omega)$, then the discrete Hessian $H_h(v_h)$ weakly converges to $D^2v$ in $[L^2(\Omega)]^{2\times 2}$ as $h\rightarrow 0$. Note that the uniform boundedness assumption implies that the limit $v$ belongs to $H_0^2(\Omega)$.
          5. The use of a reconstructed operator simplifies the design of the numerical algorithm. In particular, no integration by parts is needed to derive the discrete problem. This strategy of replacing differential operators by appropriate discrete counter-parts can be applied to nonlinear and more general problems, for instance variational problems without a readily accessible strong formulation. It has been used for instance in [BGNY2020] and [BGNY2021] in the context of large bending deformation of plates.
    -

    As in step-47, we could consider $C^0$ finite element approximations by replacing FE_DGQ<dim> by FE_Q<dim> (and include the appropriate header file deal.II/fe/fe_q.h) in the program below. In this case, the jump of the basis functions across any interior face is zero, and thus $b_e\left(\jump{\varphi_i}\right)=\mathbf{0}$ for all $e\in\mathcal{E}_h^0$, and could be dropped to save computational time. While an overkill for the bi-Laplacian problem, the flexibility of fully discontinuous methods combined with reconstructed differential operators is advantageous for nonlinear problems.

    +

    As in step-47, we could consider $C^0$ finite element approximations by replacing FE_DGQ<dim> by FE_Q<dim> (and include the appropriate header file deal.II/fe/fe_q.h) in the program below. In this case, the jump of the basis functions across any interior face is zero, and thus $b_e\left(\jump{\varphi_i}\right)=\mathbf{0}$ for all $e\in\mathcal{E}_h^0$, and could be dropped to save computational time. While an overkill for the bi-Laplacian problem, the flexibility of fully discontinuous methods combined with reconstructed differential operators is advantageous for nonlinear problems.

    Implementation

    -

    As customary, we assemble the matrix $A$ and the right-hand side $\boldsymbol{F}$ by looping over the elements $K\in\mathcal{T}_h$. Since we are using discontinuous finite elements, the support of each $\varphi_i$ is only one element $K\in\mathcal{T}_h$. However, due to the lifting operators, the support of $H_h(\varphi_i)$ is $K$ plus all the neighbors of $K$ (recall that for $e\in \mathcal{E}_h$, the support of the lifting operators $r_e$ and $b_e$ is $\omega_e$). Therefore, when integrating over a cell $K_c$, we need to consider the following interactions (case $d=2$)

    +

    As customary, we assemble the matrix $A$ and the right-hand side $\boldsymbol{F}$ by looping over the elements $K\in\mathcal{T}_h$. Since we are using discontinuous finite elements, the support of each $\varphi_i$ is only one element $K\in\mathcal{T}_h$. However, due to the lifting operators, the support of $H_h(\varphi_i)$ is $K$ plus all the neighbors of $K$ (recall that for $e\in \mathcal{E}_h$, the support of the lifting operators $r_e$ and $b_e$ is $\omega_e$). Therefore, when integrating over a cell $K_c$, we need to consider the following interactions (case $d=2$)

    &#href_anchor"center"> @@ -303,7 +303,7 @@
    -

    The last of these accounts that the lifted shape functions from one of the neighbor cells may overlap on $K_c$ with the lifted shape functions of another neighbor cell, as mentioned above. In other words, we need to compute the discrete Hessian of all the basis functions with support on $K_c$ as well as all the basis functions with support on the neighboring cells of $K_c$. This is done in the function compute_discrete_hessians. A cell $K_c$ can have fewer than four neighbors (six when $d=3$) when at least one face $e\subset\partial K_c$ is part of the boundary of the domain. It can also have more neighbors when hanging nodes are present. To simplify the presentation we do not discuss the latter.

    +

    The last of these accounts that the lifted shape functions from one of the neighbor cells may overlap on $K_c$ with the lifted shape functions of another neighbor cell, as mentioned above. In other words, we need to compute the discrete Hessian of all the basis functions with support on $K_c$ as well as all the basis functions with support on the neighboring cells of $K_c$. This is done in the function compute_discrete_hessians. A cell $K_c$ can have fewer than four neighbors (six when $d=3$) when at least one face $e\subset\partial K_c$ is part of the boundary of the domain. It can also have more neighbors when hanging nodes are present. To simplify the presentation we do not discuss the latter.

    Due to the local support of the basis functions, many of the terms of the discrete Hessian are zero. For any basis function $\varphi^c$ with support on $K_c$ we have $r_e\left(\jump{\nabla_h\varphi^c}\right)\not\equiv 0$ only if $e\subset\partial K_c$, and similarly for $b_e\left(\jump{\varphi^c}\right)$. Therefore, the discrete Hessian of $\varphi^c$ reduces to

    \[
 H_h(\varphi^c)=D_h^2\varphi^c-\sum_{e\subset\partial K}r_e\left(\jump{\nabla_h \varphi^c}\right)+\sum_{e\subset\partial K}b_e\left(\jump{\varphi^c}\right).
@@ -314,7 +314,7 @@
 {\rm compute\_discrete\_hessians[i][q]}, \qquad 0\leq {\rm i} < {\rm n\_dofs}, \,\, 0\leq {\rm q} < {\rm n\_q\_points},
 \]

    -

    where n_dofs = fe_values.dofs_per_cell is the number of degrees of freedom per cell and n_q_points = quad.size() is the number of quadrature points on $K_c$. For any basis function $\varphi^n$ with support on a neighboring cell, the discrete Hessian $H_h(\varphi^n)$ evaluated on $K_c$ contains only the two lifting terms, but not the term involving $D^2_h\varphi^n$, since $\varphi^n|_{K}\equiv 0$. Moreover, only the lifting over the common face $e$ is nonzero on $K_c$, namely for all $x_q\in K_c$

    +

    where n_dofs = fe_values.dofs_per_cell is the number of degrees of freedom per cell and n_q_points = quad.size() is the number of quadrature points on $K_c$. For any basis function $\varphi^n$ with support on a neighboring cell, the discrete Hessian $H_h(\varphi^n)$ evaluated on $K_c$ contains only the two lifting terms, but not the term involving $D^2_h\varphi^n$, since $\varphi^n|_{K}\equiv 0$. Moreover, only the lifting over the common face $e$ is nonzero on $K_c$, namely for all $x_q\in K_c$

    \[
 H_h(\varphi^n)(x_q)=-r_e\left(\jump{\nabla_h\varphi^n}\right)(x_q)+b_e\left(\jump{\varphi^n}\right)(x_q).
 \] @@ -367,34 +367,34 @@ \mathbf{n}_e\jump{\varphi}=\mathbf{n}_{K_c}\varphi|_{K_c}+\mathbf{n}_{K_n}\varphi|_{K_n}, \]" src="form_7615.png"/>

    -

    where $\mathbf{n}_{K_c}$ (resp. $\mathbf{n}_{K_n}$) denotes the outward unit normal to $K_c$ (resp. $K_n$). Therefore, if $\varphi=\varphi^c$, namely $\varphi$ has support on the current cell $K_c$, then

    +

    where $\mathbf{n}_{K_c}$ (resp. $\mathbf{n}_{K_n}$) denotes the outward unit normal to $K_c$ (resp. $K_n$). Therefore, if $\varphi=\varphi^c$, namely $\varphi$ has support on the current cell $K_c$, then

    \[
 G_m=\int_e\average{{\rm div}\, \psi_m}\cdot\mathbf{n}_e\jump{\varphi^c}=\frac{1}{2}\int_e{\rm div}\, \psi_m\cdot\mathbf{n}_{K_c}\varphi^c,
 \]

    -

    while if $\varphi=\varphi^n$, namely $\varphi$ has support on the neighboring cell $K_n$, then

    +

    while if $\varphi=\varphi^n$, namely $\varphi$ has support on the neighboring cell $K_n$, then

    \[
 G_m=\int_e\average{{\rm div}\, \psi_m}\cdot\mathbf{n}_e\jump{\varphi^n}=\frac{1}{2}\int_e{\rm div}\, \psi_m\cdot\mathbf{n}_{K_n}\varphi^n.
 \]

    -

    The factor $\frac{1}{2}$ comes from the average operator as $e$ is assumed to be an interior face.

    +

    The factor $\frac{1}{2}$ comes from the average operator as $e$ is assumed to be an interior face.

    Test case

    The performance of the numerical algorithm will be assessed using a manufactured solution $u:(0,1)^d\rightarrow\mathbb{R}$ given by

    \[
 u(x,y)=x^2(1-x)^2y^2(1-y)^2
 \]

    -

    if $d=2$, while if $d=3$ we take

    +

    if $d=2$, while if $d=3$ we take

    \[
 u(x,y,z)=x^2(1-x)^2y^2(1-y)^2z^2(1-z)^2.
 \]

    -

    For different values of $h$, we will report the error $u-u_h$ measured in the discrete $H^2$ metric (defined above but extended to piecewise $H^2$ functions), the discrete $H^1$ metric

    +

    For different values of $h$, we will report the error $u-u_h$ measured in the discrete $H^2$ metric (defined above but extended to piecewise $H^2$ functions), the discrete $H^1$ metric

    \[
 \|v\|_{H_h^1(\Omega)}^2 \dealcoloneq \|\nabla_h v\|_{L^2(\Omega)}^2+\sum_{e\in\mathcal{E}_h}h_e^{-1}\|\jump{v}\|_{L^2(e)}^2, \quad v\in \prod_{K\in\mathcal{T}_h}H^1(K),
 \]

    -

    as well as the $L^2$ metric.

    +

    as well as the $L^2$ metric.

    The commented program

    Include files

    All the include files have already been discussed in previous tutorials.

    @@ -459,7 +459,7 @@
      void compute_errors();
      void output_results() const;
     
    -

    As indicated by its name, the function assemble_local_matrix() is used for the assembly of the (local) mass matrix used to compute the two lifting terms (see the matrix $\boldsymbol{M}_c$ introduced in the introduction when describing the computation of $b_e$). The function compute_discrete_hessians() computes the required discrete Hessians: the discrete Hessians of the basis functions with support on the current cell (stored in the output variable discrete_hessians) and the basis functions with support on a neighbor of the current cell (stored in the output variable discrete_hessians_neigh). More precisely, discrete_hessians[i][q_point] stores $H_h(\varphi_i)(x_q)$, where $\varphi_i$ is a basis function with support on cell, while discrete_hessians_neigh[face_no][i][q_point] stores $H_h(\varphi_i)(x_q)$, where $\varphi_i$ is a basis function of the neighboring cell adjacent to the face face=cell->face(face_no).

    +

    As indicated by its name, the function assemble_local_matrix() is used for the assembly of the (local) mass matrix used to compute the two lifting terms (see the matrix $\boldsymbol{M}_c$ introduced in the introduction when describing the computation of $b_e$). The function compute_discrete_hessians() computes the required discrete Hessians: the discrete Hessians of the basis functions with support on the current cell (stored in the output variable discrete_hessians) and the basis functions with support on a neighbor of the current cell (stored in the output variable discrete_hessians_neigh). More precisely, discrete_hessians[i][q_point] stores $H_h(\varphi_i)(x_q)$, where $\varphi_i$ is a basis function with support on cell, while discrete_hessians_neigh[face_no][i][q_point] stores $H_h(\varphi_i)(x_q)$, where $\varphi_i$ is a basis function of the neighboring cell adjacent to the face face=cell->face(face_no).

      void assemble_local_matrix(const FEValues<dim> &fe_values_lift,
      const unsigned int n_q_points,
      FullMatrix<double> &local_matrix);
    @@ -497,7 +497,7 @@ -

    Finally, the last two variables correspond to the penalty coefficients $\gamma_1$ and $\gamma_0$ for the jump of $\nabla_hu_h$ and $u_h$, respectively.

    +

    Finally, the last two variables correspond to the penalty coefficients $\gamma_1$ and $\gamma_0$ for the jump of $\nabla_hu_h$ and $u_h$, respectively.

      const double penalty_jump_grad;
      const double penalty_jump_val;
      };
    @@ -1248,7 +1248,7 @@
    void initialize(const SparsityPattern &sparsity_pattern)

    BiLaplacianLDGLift::compute_errors

    -

    This function computes the discrete $H^2$, $H^1$ and $L^2$ norms of the error $u-u_h$, where $u$ is the exact solution and $u_h$ is the approximate solution. See the introduction for the definition of the norms.

    +

    This function computes the discrete $H^2$, $H^1$ and $L^2$ norms of the error $u-u_h$, where $u$ is the exact solution and $u_h$ is the approximate solution. See the introduction for the definition of the norms.

      template <int dim>
      void BiLaplacianLDGLift<dim>::compute_errors()
      {
    @@ -1472,7 +1472,7 @@
     

    BiLaplacianLDGLift::compute_discrete_hessians

    -

    This function is the main novelty of this program. It computes the discrete Hessian $H_h(\varphi)$ for all the basis functions $\varphi$ of $\mathbb{V}_h$ supported on the current cell and those supported on a neighboring cell. The first argument indicates the current cell (referring to the global DoFHandler object), while the other two arguments are output variables that are filled by this function.

    +

    This function is the main novelty of this program. It computes the discrete Hessian $H_h(\varphi)$ for all the basis functions $\varphi$ of $\mathbb{V}_h$ supported on the current cell and those supported on a neighboring cell. The first argument indicates the current cell (referring to the global DoFHandler object), while the other two arguments are output variables that are filled by this function.

    In the following, we need to evaluate finite element shape functions for the fe_lift finite element on the current cell. Like for example in step-61, this "lift" space is defined on every cell individually; as a consequence, there is no global DoFHandler associated with this because we simply have no need for such a DoFHandler. That leaves the question of what we should initialize the FEValues and FEFaceValues objects with when we ask them to evaluate shape functions of fe_lift on a concrete cell. If we simply provide the first argument to this function, cell, to FEValues::reinit(), we will receive an error message that the given cell belongs to a DoFHandler that has a different finite element associated with it than the fe_lift object we want to evaluate. Fortunately, there is a relatively easy solution: We can call FEValues::reinit() with a cell that points into a triangulation – the same cell, but not associated with a DoFHandler, and consequently no finite element space. In that case, FEValues::reinit() will skip the check that would otherwise lead to an error message. All we have to do is to convert the DoFHandler cell iterator into a Triangulation cell iterator; see the first couple of lines of the function below to see how this can be done.

      template <int dim>
      void BiLaplacianLDGLift<dim>::compute_discrete_hessians(
    @@ -1557,7 +1557,7 @@
     
      const bool at_boundary = face->at_boundary();
     
    -

    Recall that by convention, the average of a function across a boundary face $e$ reduces to the trace of the function on the only element adjacent to $e$, namely there is no factor $\frac{1}{2}$. We distinguish between the two cases (the current face lies in the interior or on the boundary of the domain) using the variable factor_avg.

    +

    Recall that by convention, the average of a function across a boundary face $e$ reduces to the trace of the function on the only element adjacent to $e$, namely there is no factor $\frac{1}{2}$. We distinguish between the two cases (the current face lies in the interior or on the boundary of the domain) using the variable factor_avg.

      factor_avg = 0.5;
      if (at_boundary)
      {
    @@ -1818,7 +1818,7 @@ 6 4096 36864 1.785e-03 1.02 7.850e-06 1.95 1.277e-06 1.91 -

    This matches the expected optimal convergence rates for the $H^2$ and $H^1$ norms, but is sub-optimal for the $L_2$ norm. Incidentally, this also matches the results seen in step-47 when using polynomial degree $k=2$.

    +

    This matches the expected optimal convergence rates for the $H^2$ and $H^1$ norms, but is sub-optimal for the $L_2$ norm. Incidentally, this also matches the results seen in step-47 when using polynomial degree $k=2$.

    /usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html 2024-11-15 06:44:33.459707572 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_83.html 2024-11-15 06:44:33.459707572 +0000 @@ -317,7 +317,7 @@

    Intuitively, one might simply want to use the same idea as we used here except that we let every MPI process serialize its own data, and read its own data. This works, but there are some drawbacks:

    In order to address these issues, in particular the last one, the right approach is to deviate a bit from the simple scheme of having a serialize() function that simply serializes/deserializes everything into an archive, and then have two functions checkpoint() and restart() that for all practical purposes defer all the work to the serialize() function. Instead, one splits all data into two categories:

    Of course, the analytical solution, and thus also the error, is only defined in $\overline{\Omega}$. Thus, to compute the $L^2$-error we must proceed in the same way as when we assembled the linear system. We first create an NonMatching::FEValues object.

      template <int dim>
      double LaplaceSolver<dim>::compute_L2_error() const
      {
    @@ -921,7 +921,7 @@
    constexpr T fixed_power(const T t)
    Definition utilities.h:942
    ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)

    A Convergence Study

    -

    Finally, we do a convergence study to check that the $L^2$-error decreases with the expected rate. We refine the background mesh a few times. In each refinement cycle, we solve the problem, compute the error, and add the $L^2$-error and the mesh size to a ConvergenceTable.

    +

    Finally, we do a convergence study to check that the $L^2$-error decreases with the expected rate. We refine the background mesh a few times. In each refinement cycle, we solve the problem, compute the error, and add the $L^2$-error and the mesh size to a ConvergenceTable.

      template <int dim>
      void LaplaceSolver<dim>::run()
      {
    @@ -977,13 +977,13 @@
      }

    Results

    The numerical solution for one of the refinements is shown in the below figure. The zero-contour of the level set function is shown as a white line. On the intersected cells, we see that the numerical solution has a value also outside $\overline{\Omega}$. As mentioned earlier, this extension of the solution is artificial.

    -

    The results of the convergence study is shown in the table below. We see that the $L^2$ error decreases as we refine and that the estimated order of convergence, EOC, is close to 2.

    +

    The results of the convergence study is shown in the table below. We see that the $L^2$ error decreases as we refine and that the estimated order of convergence, EOC, is close to 2.

    - + /usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html 2024-11-15 06:44:33.575708608 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_86.html 2024-11-15 06:44:33.579708644 +0000 @@ -213,7 +213,7 @@ \end{align*}" src="form_7730.png"/>

    where $g_h$ is an interpolant of the function $g$ on the boundary.

    -

    This equation can be rewritten in matrix form in the usual way, by expanding $u_h$ into its coefficients times shape function form, pulling the sum over $j$ out of the integrals, and then considering that choosing test function $\varphi_i$ leads to the $i$th row of the linear system. This then gives us

    +

    This equation can be rewritten in matrix form in the usual way, by expanding $u_h$ into its coefficients times shape function form, pulling the sum over $j$ out of the integrals, and then considering that choosing test function $\varphi_i$ leads to the $i$th row of the linear system. This then gives us

    \begin{align*}
   M
   \frac{\partial U(t)}{\partial t}
@@ -237,7 +237,7 @@
   F(t),
 \end{align*}

    -

    which matches the form stated in the documentation of SUNDIALS::ARKode. In particular, ARKode is able to deal with the fact that the time derivative is multiplied by the mass matrix $M$, which is always there when using finite elements.

    +

    which matches the form stated in the documentation of SUNDIALS::ARKode. In particular, ARKode is able to deal with the fact that the time derivative is multiplied by the mass matrix $M$, which is always there when using finite elements.

    On the other hand, when using the PETScWrappers::TimeStepper class, we can solve ODEs that are stated in a general "implicit" form, and in that case we simply bring everything to the left hand side and obtain

    \begin{align*}
   \underbrace{
@@ -252,13 +252,13 @@
   0.
 \end{align*}

    -

    This matches the form $R(t,U,\dot U) = 0$ you can find in the documentation of PETScWrappers::TimeStepper if you identify the time dependent function $y=y(t)$ used there with our solution vector $U(t)$, and our notation $R(t,U,\dot U)$ instead of the $F(t,y,\dot y)$ used there and which we rename because we want to use $F$ as the right hand side vector of the ODE indicating forcing terms.

    +

    This matches the form $R(t,U,\dot U) = 0$ you can find in the documentation of PETScWrappers::TimeStepper if you identify the time dependent function $y=y(t)$ used there with our solution vector $U(t)$, and our notation $R(t,U,\dot U)$ instead of the $F(t,y,\dot y)$ used there and which we rename because we want to use $F$ as the right hand side vector of the ODE indicating forcing terms.

    This program uses the PETScWrappers::TimeStepper class, and so we will take the latter viewpoint. (It is worth noting that SUNDIALS also has a package that can solve ODEs in implicit form, wrapped by the SUNDIALS::IDA class.) In what follows, we will continue to use $U(t)$ as the function we seek, even though the documentation of the class uses $y(t)$.

    Mapping the differential equation formulation to the time stepper

    Having identified how we want to see the problem (namely, as an "implicit" ODE), the question is how we describe the problem to the time stepper. Conceptually, all of the wrappers for time stepping packages we support in deal.II only requires us to provide them with a very limited set of operations. Specifically, for the implicit formulation used by PETScWrappers::TimeStepper, all we need to implement are functions that provide the following:

    That's really it. If we can provide these three functions, PETSc will do the rest (as would, for example, SUNDIALS::ARKode or, if you prefer the implicit form, SUNDIALS::IDA). It will not be very difficult to set these things up. In practice, the way this will work is that inside the run() function, we will set up lambda functions that can access the information of the surrounding scopes and that return the requested information.

    @@ -321,7 +321,7 @@ \forall \mathbf x \in \partial\Omega, t \in (0,T). \end{align*}" src="form_3729.png"/>

    -

    The right hand side $f$, initial conditions $u_0$, and Dirichlet boundary values $g$ are all specified in an input file heat_equation.prm in which these functions are provided as expressions that are parsed and evaluated at run time using the Functions::ParsedFunction<dim> class. The version of this file that is distributed with the library uses

    +

    The right hand side $f$, initial conditions $u_0$, and Dirichlet boundary values $g$ are all specified in an input file heat_equation.prm in which these functions are provided as expressions that are parsed and evaluated at run time using the Functions::ParsedFunction<dim> class. The version of this file that is distributed with the library uses

    \begin{align*}
   f(\mathbf x,t) &= 0, \\
   u_0(\mathbf x) &= 0, \\
@@ -734,7 +734,7 @@
    R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
    \]

    -

    We could do that by actually forming the matrices $M$ and $A$, but this is not efficient. Instead, recall (by writing out how the elements of $M$ and $A$ are defined, and exchanging integrals and sums) that the $i$th element of the residual vector is given by

    +

    We could do that by actually forming the matrices $M$ and $A$, but this is not efficient. Instead, recall (by writing out how the elements of $M$ and $A$ are defined, and exchanging integrals and sums) that the $i$th element of the residual vector is given by

    \begin{align*}
    R(t,U,\dot U)_i
    &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
@@ -814,7 +814,7 @@
    J_\alpha = A + \alpha M
    \]

    -

    and which is in particular independent of time and the current solution vectors $y$ and $\dot y$.

    +

    and which is in particular independent of time and the current solution vectors $y$ and $\dot y$.

    Having seen the assembly of matrices before, there is little that should surprise you in the actual assembly here:

      template <int dim>
      void HeatEquation<dim>::assemble_implicit_jacobian(
    @@ -882,7 +882,7 @@ J_\alpha = \alpha M + A \]" src="form_7755.png"/>

    -

    where $M$ is a mass matrix and $A$ a Laplace matrix. $M$ is symmetric and positive definite; $A$ is symmetric and at least positive semidefinite; $\alpha> 0$. As a consequence, the Jacobian matrix is a symmetric and positive definite matrix, which we can efficiently solve with the Conjugate Gradient method, along with either SSOR or (if available) the algebraic multigrid implementation provided by PETSc (via the Hypre package) as preconditioner. In practice, if you wanted to solve "real" problems, one would spend some time finding which preconditioner is optimal, perhaps using PETSc's ability to read solver and preconditioner choices from the command line. But this is not the focus of this tutorial program, and so we just go with the following:

    +

    where $M$ is a mass matrix and $A$ a Laplace matrix. $M$ is symmetric and positive definite; $A$ is symmetric and at least positive semidefinite; $\alpha> 0$. As a consequence, the Jacobian matrix is a symmetric and positive definite matrix, which we can efficiently solve with the Conjugate Gradient method, along with either SSOR or (if available) the algebraic multigrid implementation provided by PETSc (via the Hypre package) as preconditioner. In practice, if you wanted to solve "real" problems, one would spend some time finding which preconditioner is optimal, perhaps using PETSc's ability to read solver and preconditioner choices from the command line. But this is not the focus of this tutorial program, and so we just go with the following:

      template <int dim>
      void
      HeatEquation<dim>::solve_with_jacobian(const PETScWrappers::MPI::Vector &src,
    @@ -1375,7 +1375,7 @@
    | solve with Jacobian | 593 | 1.97s | 1.7% |
    | update current constraints | 509 | 4.53s | 3.9% |
    +---------------------------------+-----------+------------+------------+
    -

    What is happening here is that apparently PETSc TS is not happy with our choice of initial time step size, and after several linear solves has reduced it to the minimum we allow it to, 0.01. The following time steps then run at a time step size of 0.01 until it decides to make it slightly larger again and (apparently) switches to a higher order method that requires more linear solves per time step but allows for a larger time step closer to our initial choice 0.025 again. It does not quite hit the final time of $T=5$ with its time step choices, but we've got only ourselves to blame for that by setting

    set match final time = false
    +

    What is happening here is that apparently PETSc TS is not happy with our choice of initial time step size, and after several linear solves has reduced it to the minimum we allow it to, 0.01. The following time steps then run at a time step size of 0.01 until it decides to make it slightly larger again and (apparently) switches to a higher order method that requires more linear solves per time step but allows for a larger time step closer to our initial choice 0.025 again. It does not quite hit the final time of $T=5$ with its time step choices, but we've got only ourselves to blame for that by setting

    set match final time = false

    in the input file.

    Not all combinations of methods, time step adaptation algorithms, and other parameters are valid, but the main messages from the experiment above that you should take away are:

    evaluate the values and gradients of a solution defined by DoFHandler and a vector at the requested points. Internally, a lambda function is passed to Utilities::MPI::RemotePointEvaluation. Additionally it handles the special case if points belong to multiple cells by taking, e.g., the average, the minimum, or the maximum via an optional argument of type EvaluationFlags::EvaluationFlags. This occurs when a point lies on a cell boundary or within a small tolerance around it and might be relevant for discontinuous solution quantities, such as values of discontinuous Galerkin methods or gradients in continuous finite element methods.

    Motivation: two-phase flow

    -

    The minimal code examples (short "mini examples") presented in this tutorial are motivated by the application of two-phase-flow simulations formulated in a one-fluid setting using a Eulerian framework. In diffuse interface methods, the two phases may be implicitly described by a level-set function, here chosen as a signed distance function $\phi(\boldsymbol{ x})$ in $\Omega$ and illustrated for a popular benchmark case of a rising bubble in the following figure.

    +

    The minimal code examples (short "mini examples") presented in this tutorial are motivated by the application of two-phase-flow simulations formulated in a one-fluid setting using a Eulerian framework. In diffuse interface methods, the two phases may be implicitly described by a level-set function, here chosen as a signed distance function $\phi(\boldsymbol{ x})$ in $\Omega$ and illustrated for a popular benchmark case of a rising bubble in the following figure.

    Cycle Mesh size $L^2$-Error EOC
    Cycle Mesh size $L^2$-Error EOC
    0 0.3025 8.0657e-02 -
    @@ -251,14 +251,14 @@ \in \Gamma \]" src="form_7778.png"/>

    -

    which considers an explicit Euler time integration scheme from time step $i$ to $i+1$ with time step-size $\Delta t$.

    +

    which considers an explicit Euler time integration scheme from time step $i$ to $i+1$ with time step-size $\Delta t$.

    For a two-phase-flow model considering the incompressible Navier-Stokes equations, the two phases are usually coupled by a singular surface-tension force $\boldsymbol{F}_S$, which results, together with the difference in fluid properties, in discontinuities across the interface:

    \[
         \boldsymbol{F}_S(\boldsymbol{x})= \sigma \kappa(\boldsymbol{x})
   \boldsymbol{n}(\boldsymbol{x}) \delta_{\Gamma}(\boldsymbol{x}).
 \]

    -

    Here $\sigma$ represents the surface-tension coefficient, $\boldsymbol{n}(\boldsymbol{x})$ the interface normal vector and $\kappa(\boldsymbol{x})$ the interface mean curvature field. The singularity at the interface is imposed by the Dirac delta function

    +

    Here $\sigma$ represents the surface-tension coefficient, $\boldsymbol{n}(\boldsymbol{x})$ the interface normal vector and $\kappa(\boldsymbol{x})$ the interface mean curvature field. The singularity at the interface is imposed by the Dirac delta function

    \[
 \delta_{\Gamma}(\boldsymbol{x}) = \begin{cases}
 1 & \text{on } \Gamma \\
@@ -271,7 +271,7 @@
   \kappa \boldsymbol{n} \right)_\Gamma,
 \]

    -

    exploiting the property of the Dirac delta function for any smooth function $v$, i.e., $\int_\Omega\delta_{\Gamma}\,v\,\text{d}x=\int_\Gamma v\,\text{d}y$. For front-tracking methods, the curvature and the normal vector are directly computed from the surface mesh.

    +

    exploiting the property of the Dirac delta function for any smooth function $v$, i.e., $\int_\Omega\delta_{\Gamma}\,v\,\text{d}x=\int_\Gamma v\,\text{d}y$. For front-tracking methods, the curvature and the normal vector are directly computed from the surface mesh.

    Alternatively, in regularized surface-tension-force models [brackbill1992continuum] [olsson2005conservative] [kronbichler2018fast], the Dirac delta function is approximated by a smooth ansatz

    \[
 (\boldsymbol v, \boldsymbol F_S)_{\Omega} \approx \left(\boldsymbol v, \sigma
@@ -539,7 +539,7 @@
 <div class= 

    unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
    Definition mpi.cc:107
    -

    We determine a finite element solution representing implicitly the geometry of a sphere with a radius of $r=0.25$ and the center at $(0.5,0.5)$ via a signed distance function.

    +

    We determine a finite element solution representing implicitly the geometry of a sphere with a radius of $r=0.25$ and the center at $(0.5,0.5)$ via a signed distance function.

      signed_distance.reinit(dof_handler.locally_owned_dofs(),
    /usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html 2024-11-15 06:44:33.735710037 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_89.html 2024-11-15 06:44:33.735710037 +0000 @@ -167,34 +167,34 @@

    This tutorial presents one way how to apply non-matching and/or Chimera methods within matrix-free loops in deal.II. We are following [heinz2022high] to show that in some cases a simple point-to-point interpolation is not sufficient. As a remedy, Nitsche-type mortaring is used to suppress artificial modes observed for the acoustic conservation equations [heinz2022high].

    Acoustic conservation equations

    Acoustic conservation equations are used to describe linear wave propagation. The set of equations consists of the conservation of mass and momentum

    -\[
+<picture><source srcset=\[
   \frac{\partial \, p}{\partial \, t} + \rho c^2 \nabla\cdot \mathbf{u} = 0,\\
   \frac{\partial \, \mathbf{u}}{\partial \, t} + \frac{1}{\rho}\nabla p = \mathbf{0}.
-\] +\]" src="form_7806.png"/>

    -

    Here, $p$ is the acoustic pressure, $\mathbf{u}$ the acoustic particle velocity, $c$ the speed of sound, and $\rho$ the mean density of the fluid in which waves are propagating. As stated above, the two equations are simply a different way of writing the wave equation: If you take the time derivative of the first equation, and the divergence of the second, i.e., compute

    -\[
+<p> Here, <picture><source srcset=$p$ is the acoustic pressure, $\mathbf{u}$ the acoustic particle velocity, $c$ the speed of sound, and $\rho$ the mean density of the fluid in which waves are propagating. As stated above, the two equations are simply a different way of writing the wave equation: If you take the time derivative of the first equation, and the divergence of the second, i.e., compute

    +\[
   \frac{\partial^2 \, p}{\partial \, t^2} + \rho c^2 \nabla\cdot
      \frac{\partial \mathbf{u}}{\partial t} = 0,\\
   \frac{\partial \, \nabla \cdot \mathbf{u}}{\partial \, t} +
      \nabla \cdot \frac{1}{\rho}\nabla p = \mathbf{0},
-\] +\]" src="form_7807.png"/>

    then you can substitute the second equation into the first one to obtain

    -\[
+<picture><source srcset=\[
   \frac{\partial^2 \, p}{\partial \, t^2} - \rho c^2 \nabla \cdot
      \frac{1}{\rho}\nabla p = \mathbf{0},
-\] +\]" src="form_7808.png"/>

    which in the case of constant density $\rho$ results in the more familiar form of the wave equation that we have previously solved in step-23:

    -\[
+<picture><source srcset=\[
   \frac{\partial^2 \, p}{\partial \, t^2} - c^2 \Delta p = \mathbf{0}.
-\] +\]" src="form_7809.png"/>

    The reason one may want to consider the form we use here (rather than the form used in step-23) is that it has the form of a hyperbolic conservation law in which only first temporal and spatial derivatives appear. Whereas both the more familiar, second order form of the wave equation and the formulation as a first-order system conserve energy, it is often easier to devise numerical schemes that have the right amount of dissipation (necessary for numerical stability) using the well-developed machinery available for first-order systems.

    For the discretization of this form, we make use of discontinuous Galerkin (DG) methods. DG methods are especially attractive for the acoustic conservation equations due to their low numerical dispersion errors. More importantly for this tutorial, DG methods natively extend to non-matching Nitsche-type methods [arnold2002unified]. I.e., numerical fluxes are not only used on interior element faces but also as non-matching coupling conditions.

    The discretized equations read

    -\[
+<picture><source srcset=\[
   \int_{K} q_h\frac{\partial \, p_h}{\partial \, t} +
      \int_{K} q_h \rho c^2 \nabla \cdot\mathbf{u}_h +
         \int_{\partial K} q_h\mathbf{n}\cdot\rho c^2 (\mathbf{u}^*_h-\mathbf{u}_h)
@@ -203,23 +203,23 @@
      \int_{K} \mathbf{w}_h\cdot \frac{1}{\rho} \nabla p_h +
         \int_{\partial K} \mathbf{w}_h \cdot\mathbf{n} \frac{1}{\rho}(p^*_h-p_h)
            = \mathbf{0},
-\] +\]" src="form_7810.png"/>

    -

    where $\mathbf{w}_h$ and $q_h$ are test functions. The numerical fluxes are defined as follows [hochbruck2014efficient] :

    -\[
+<p> where <picture><source srcset=$\mathbf{w}_h$ and $q_h$ are test functions. The numerical fluxes are defined as follows [hochbruck2014efficient] :

    +\[
   p_h^*=p_h-\frac{\tau^-}{\tau^-+\tau^+}[p_h] +
      \frac{\tau^-\tau^+}{\tau^-+\tau^+}\jump{\mathbf{u}_h},\\
   \mathbf{u}_h^*=\mathbf{u}_h-\frac{\gamma^-}{\gamma^-+\gamma^+}[\mathbf{u}_h]
      +\frac{\gamma^-\gamma^+}{\gamma^-+\gamma^+}\jump{p_h},
-\] +\]" src="form_7811.png"/>

    -

    with the penalty parameters $\tau=\frac{\rho c}{2}$ and $\gamma=\frac{1}{2\rho c}$. In these formulas, $[a] = a^- - a^+ $ denotes the jump of an arbitrary quantity $a$ over element faces (face between elements $K^-$ and $K^+$) and $\jump{a} = a^- \mathbf{n}^- + a^+ \mathbf{n}^+$. For homogeneous materials, the fluxes reduce to standard Lax–Friedrichs fluxes ( $\gamma^-=\gamma^+$ and $\tau^-=\tau^+$)

    -\[
+<p> with the penalty parameters <picture><source srcset=$\tau=\frac{\rho c}{2}$ and $\gamma=\frac{1}{2\rho c}$. In these formulas, $[a] = a^- - a^+ $ denotes the jump of an arbitrary quantity $a$ over element faces (face between elements $K^-$ and $K^+$) and $\jump{a} = a^- \mathbf{n}^- + a^+ \mathbf{n}^+$. For homogeneous materials, the fluxes reduce to standard Lax–Friedrichs fluxes ( $\gamma^-=\gamma^+$ and $\tau^-=\tau^+$)

    +\[
   p_h^*=\average{p_h}+\tau\jump{\mathbf{u}_h},\\
   \mathbf{u}_h^*=\average{\mathbf{u}_h}+\gamma\jump{p_h}.
-\] +\]" src="form_7818.png"/>

    -

    The expression $\average{a}=\frac{a^- + a^+}{2}$ denotes the averaging operator.

    +

    The expression $\average{a}=\frac{a^- + a^+}{2}$ denotes the averaging operator.

    Non-matching discretizations

    Non-matching discretizations can be used to connect mesh regions with different element sizes without the need for a transition region. Therefore, they are highly desirable in multiphysics applications. One example is a plate that radiates sound. The plate needs a much finer discretization than the surrounding air because – due to the vastly different speeds of sound in the two media – the wavelengths of sound of the same frequently is very different in the two media, and the mesh size needs to be proportional to the wavelength. We will simulate this example later on.

    A different example of the usefulness of non-matching grids is where one wants to move the mesh in parts of the domain, but not others. A typical example is the simulation of windmills: One might want to enclose the rotating wings into a co-rotating mesh (to avoid having to remesh with every time step) but of course the mesh that describes the air above the surrounding landscape and around the tower on which the windmill is located should not rotate. In a case like this, one considers sliding rotating interfaces [duerrwaechter2021an] between the co-rotating part of the mesh and the stationary part of the mesh, but this also requires the ability to handle non-matching discretizations.

    @@ -229,32 +229,32 @@
    -

    Point-to-point interpolation is a naive approach. Whenever you need to compute integrals over the boundary of the cell at the left, for a coupled problem you then need to evaluate the solution or shape functions on the right at quadrature points of the face on the left, i.e., of the face of element $K^-$. You can just evaluate these be interpolating the information on the right at these points, but this is in general expensive (read, for example, the documentation of VectorTools::point_value(), which implements this kind of functionality). As can be seen from the picture this approach might be subject to aliasing in some cases.

    +

    Point-to-point interpolation is a naive approach. Whenever you need to compute integrals over the boundary of the cell at the left, for a coupled problem you then need to evaluate the solution or shape functions on the right at quadrature points of the face on the left, i.e., of the face of element $K^-$. You can just evaluate these be interpolating the information on the right at these points, but this is in general expensive (read, for example, the documentation of VectorTools::point_value(), which implements this kind of functionality). As can be seen from the picture this approach might be subject to aliasing in some cases.

    Nitsche-type mortaring

    -

    Mortaring is the process of computing intersections and is not related to the Mortar method which enforces the coupling via Lagrange multipliers. Instead, in mortaring methods one refers to obtained intersections as "mortars". On each mortar a new integration rule is defined. The integral of the face of element $K^-$ is computed on the intersections. The idea is that if we want to integrate something over a face $f\subset \partial K^-$, that we break that integral into pieces:

    -\[
+<p>Mortaring is the process of computing intersections and is not related to the Mortar method which enforces the coupling via Lagrange multipliers. Instead, in mortaring methods one refers to obtained intersections as $K^-$ is computed on the intersections. The idea is that if we want to integrate something over a face $f\subset \partial K^-$, that we break that integral into pieces:

    +\[
   \int_f \cdots dx = \sum_i \int_{f_i} \cdots dx
-\] +\]" src="form_7821.png"/>

    -

    where each of the $f_i$ corresponds to the intersection of the original face $f$ on the left with exactly one of the faces on the right; or, if we had multiple faces on the left, then the $f_i$ would be the intersections of exactly one face on the left and one face on the right.

    +

    where each of the $f_i$ corresponds to the intersection of the original face $f$ on the left with exactly one of the faces on the right; or, if we had multiple faces on the left, then the $f_i$ would be the intersections of exactly one face on the left and one face on the right.

    The point of this approach is first, that splitting the integral this way is exact. Secondly, and maybe more importantly, the terms we are integrating (the dots in the formula above) are now defined on one cell on each side, and consequently are smooth (whereas a finite element solution considered across multiple cells is, in general, not smooth). As a consequence, if we approximate the integrals via numerical integration (quadrature), then the result is exact as long as a sufficient number of integration points is used (at least for affine element shapes; for general curved elements, the integrand will contain rational expressions that are difficult to integrate exactly).

    In this tutorial, the intersections are computed using CGAL, the Computational Geometry Algorithms Library. Therefore, deal.II has to be configured with DEAL_II_WITH_CGAL for the Nitsche-type mortaring implementation. See the deal.II Readme file for information about installation.

    FERemoteEvaluation

    In practice, for integrals as those mentioned above, we need to evaluate solutions (and shape functions) from cells across the non-matching interface. This is awkward enough if the other side is on the same processor, but outright difficult if the cells on the other side of the interface are owned by a different process in a parallel computation.

    On regular meshes (say, doing things as we do in step-40), this is addressed by making sure that we are only computing integrals on locally owned cells and keeping around one layer of ghost cells for which we can query information. Ghost cells are the neighbors of locally owned cells, but in cases like the picture above, where the meshes are not matching, the cells on the other side of the interface are not neighbors in the logical sense – though they happen to be geometrically located adjacently. As a consequence, we need to find a way to efficiently query information on cells that are perhaps located on a different process.

    FERemoteEvaluation is a wrapper class which provides a similar interface to, e.g., the FEEvaluation and FEFaceEvaluation classes to access values over non-matching interfaces in matrix-free loops. A detailed description on how to set up the class and how to use it in actual code is given below using hands-on examples. Within this tutorial we only show the usage for non-matching discretizations. Note however, that FERemoteEvaluation can also be used in other settings such as volume coupling. Under the hood, Utilities::MPI::RemotePointEvaluation is used to query the solution or gradients at quadrature points. A detailed description how this is done can be found in step-87. The main difference in the usage of FERemoteEvaluation compared to FEEvaluation is that the interpolated values/gradients of the finite element solution at all the quadrature points are precomputed globally before the loop over the cells/faces of the mesh (i.e., near the place where the communication takes place) instead of performing the interpolation on a cell-by-cell basis. (The principal reason for this design is that MPI has a communication model where you can send messages, but you won't hear back unless the other side is actually listening. As a consequence, you can't generally write code where each process is doing its thing until it needs some information at which point it sends a message to another process to ask for something; because the other process doesn't know that there are such messages, or how many, that have been sent to it, it doesn't respond and so the first process is stuck. Instead, the programming model used with MPI is generally to collect information about everything one will need up front; then each process sends it to all the others; then each process works on these combined requests and sends the required information back to the senders; and at this point everyone has everything they need for their work and can actually do that work.)

    -

    The standard code to evaluate fluxes via FEEvaluation on interior faces between two cells reads as follows (where _m corresponds to $K^{-}$, the current cell in minus normal direction, and _p corresponds to $K^{+}$, the neighbor cell in plus normal direction):

    -

    In DG methods we have to evaluate fluxes over element faces. Exemplarily for an upwind-like flux $u^*(\mathbf{x}) = u^+(\mathbf{x})$ over element face $\partial K$ we have to compute

    -\[
+<p>The standard code to evaluate fluxes via <a class=FEEvaluation on interior faces between two cells reads as follows (where _m corresponds to $K^{-}$, the current cell in minus normal direction, and _p corresponds to $K^{+}$, the neighbor cell in plus normal direction):

    +

    In DG methods we have to evaluate fluxes over element faces. Exemplarily for an upwind-like flux $u^*(\mathbf{x}) = u^+(\mathbf{x})$ over element face $\partial K$ we have to compute

    +\[
   F^{\partial K} = \left(\varphi^-, u^+\right)_{\partial K} \approx
      \sum_q \varphi^-(\mathbf{x}_q^{\partial K})\ u^+(\mathbf{x}_q^{\partial K})\
         w_q^{\partial K} |J_q|^{\partial K}.
-\] +\]" src="form_7825.png"/>

    -

    FEFaceEvaluation::gather_evaluate(src, EvaluationFlags::values) and FEFaceEvaluation::get_value(q) extract the value at quadrature point $\mathbf{x}_q^{\partial K}$ from src. FEFaceEvaluation::submit_value(value, q) multiplies the value with the quadrature weight and the Jacobian determinant at $\mathbf{x}_q^{\partial K}$. Eventually FEFaceEvaluation::integrate_scatter(EvaluationFlags::values, dst) multiplies the values queued for evaluation by FEFaceEvaluation::submit_value() by the value of the basis functions and writes the result to dst. The corresponding code reads

    +

    FEFaceEvaluation::gather_evaluate(src, EvaluationFlags::values) and FEFaceEvaluation::get_value(q) extract the value at quadrature point $\mathbf{x}_q^{\partial K}$ from src. FEFaceEvaluation::submit_value(value, q) multiplies the value with the quadrature weight and the Jacobian determinant at $\mathbf{x}_q^{\partial K}$. Eventually FEFaceEvaluation::integrate_scatter(EvaluationFlags::values, dst) multiplies the values queued for evaluation by FEFaceEvaluation::submit_value() by the value of the basis functions and writes the result to dst. The corresponding code reads

    const auto face_function =
    [&](const MatrixFree &data, VectorType &dst, const VectorType &src,
    const std::pair<unsigned int, unsigned int> face_range) {
    @@ -326,17 +326,17 @@

    The object remote_communicator is of type FERemoteEvaluationCommunicator and assumed to be correctly initialized prior to the above code snippet. FERemoteEvaluationCommunicator internally manages the update of ghost values over non-matching interfaces and keeps track of the mapping between quadrature point index and corresponding values/gradients. As mentioned above, the update of the values/gradients happens before the actual matrix-free loop. FERemoteEvaluationCommunicator, as well as FERemoteEvaluation, behaves differently for the given template parameter value_type. If we want to access values at arbitrary points (e.g. in combination with FEPointEvaluation), then we need to choose value_type=Number. If the values are defined at quadrature points of a FEEvaluation object it is possible to get the values at the quadrature points of batches and we need to choose value_type=VectorizedArray<Number>.

    Overview of the test case

    In this program, we implemented both the point-to-point interpolation and Nitsche-type mortaring mentioned in the introduction.

    -

    At first we are considering the test case of a vibrating membrane, see e.g. [nguyen2011high]. Standing waves of length $\lambda=2/M$ are oscillating with a time period of $T=2 / (M \sqrt{d} c)$ where $d$ is the dimension of the space in which our domain is located and $M$ is the number of modes per meter, i.e. the number of half-waves per meter. The corresponding analytical solution reads as

    +

    At first we are considering the test case of a vibrating membrane, see e.g. [nguyen2011high]. Standing waves of length $\lambda=2/M$ are oscillating with a time period of $T=2 / (M \sqrt{d} c)$ where $d$ is the dimension of the space in which our domain is located and $M$ is the number of modes per meter, i.e. the number of half-waves per meter. The corresponding analytical solution reads as

    -\begin{align*}
+<picture><source srcset=\begin{align*}
   p &=\cos(M \sqrt{d} \pi c t)\prod_{i=1}^{d} \sin(M \pi x_i),\\
   u_i&=-\frac{\sin(M \sqrt{d} \pi c t)}{\sqrt{d}\rho c}
      \cos(M \pi x_i)\prod_{j=1,j\neq i}^{d} \sin(M \pi x_j),
-\end{align*} +\end{align*}" src="form_7829.png"/>

    -

    For simplicity, we are using homogeneous pressure Dirichlet boundary conditions within this tutorial. To be able to do so we have to tailor the domain size as well as the number of modes to conform with the homogeneous pressure Dirichlet boundary conditions. Within this tutorial we are using $M=10$ and a domain $\Omega=(0,1)^2$. The domain will be meshed so that the left and right parts of the domain consist of separate meshes that do not match at the interface.

    +

    For simplicity, we are using homogeneous pressure Dirichlet boundary conditions within this tutorial. To be able to do so we have to tailor the domain size as well as the number of modes to conform with the homogeneous pressure Dirichlet boundary conditions. Within this tutorial we are using $M=10$ and a domain $\Omega=(0,1)^2$. The domain will be meshed so that the left and right parts of the domain consist of separate meshes that do not match at the interface.

    As will become clear from the results, the point-to-point interpolation will result in aliasing, which can be resolved using Nitsche-type mortaring.

    -

    In a more realistic second example, we apply this implementation to a test case in which a wave is propagating from one fluid into another fluid. The speed of sound in the left part of the domain is $c=1$ and in the right part it is $c=3$. Since the wavelength is directly proportional to the speed of sound, three times larger elements can be used in the right part of the domain to resolve waves up to the same frequency. A test case like this has been simulated with a different domain and different initial conditions, e.g., in [bangerth2010adaptive].

    +

    In a more realistic second example, we apply this implementation to a test case in which a wave is propagating from one fluid into another fluid. The speed of sound in the left part of the domain is $c=1$ and in the right part it is $c=3$. Since the wavelength is directly proportional to the speed of sound, three times larger elements can be used in the right part of the domain to resolve waves up to the same frequency. A test case like this has been simulated with a different domain and different initial conditions, e.g., in [bangerth2010adaptive].

    The commented program

    Include files

    The program starts with including all the relevant header files.

    @@ -696,7 +696,7 @@
     

    Boundary conditions

    -

    To be able to use the same kernel, for all face integrals we define a class that returns the needed values at boundaries. In this tutorial homogeneous pressure Dirichlet boundary conditions are applied via the mirror principle, i.e. $p_h^+=-p_h^- + 2g$ with $g=0$.

    +

    To be able to use the same kernel, for all face integrals we define a class that returns the needed values at boundaries. In this tutorial homogeneous pressure Dirichlet boundary conditions are applied via the mirror principle, i.e. $p_h^+=-p_h^- + 2g$ with $g=0$.

      template <int dim, typename Number>
      class BCEvaluationP
      {
    @@ -716,7 +716,7 @@
      };
     
    -

    We don't have to apply boundary conditions for the velocity, i.e. $\mathbf{u}_h^+=\mathbf{u}_h^-$.

    +

    We don't have to apply boundary conditions for the velocity, i.e. $\mathbf{u}_h^+=\mathbf{u}_h^-$.

      template <int dim, typename Number>
      class BCEvaluationU
      {
    @@ -854,7 +854,7 @@
      }
     

    This next function evaluates the fluxes at faces between cells with the same material. If boundary faces are under consideration fluxes into neighboring faces do not have to be considered which is enforced via weight_neighbor=false. For non-matching faces the fluxes into neighboring faces are not considered as well. This is because we iterate over each side of the non-matching face separately (similar to a cell centric loop).

    -

    In this and following functions, we also introduce the factors $\tau$ and $\gamma$ that are computed from $\rho$ and $c$ along interfaces and that appear in the bilinear forms. Their definitions are provided in the introduction.

    +

    In this and following functions, we also introduce the factors $\tau$ and $\gamma$ that are computed from $\rho$ and $c$ along interfaces and that appear in the bilinear forms. Their definitions are provided in the introduction.

      template <bool weight_neighbor,
      typename InternalFaceIntegratorPressure,
      typename InternalFaceIntegratorVelocity,
    @@ -1430,7 +1430,7 @@
     

    Construction of non-matching triangulations

    Let us now make our way to the higher-level functions of this program.

    -

    The first of these functions creates a two dimensional square triangulation that spans from $(0,0)$ to $(1,1)$. It consists of two sub-domains. The left sub-domain spans from $(0,0)$ to $(0.525,1)$. The right sub-domain spans from $(0.525,0)$ to $(1,1)$. The left sub-domain has elements that are three times smaller compared to the ones for the right sub-domain.

    +

    The first of these functions creates a two dimensional square triangulation that spans from $(0,0)$ to $(1,1)$. It consists of two sub-domains. The left sub-domain spans from $(0,0)$ to $(0.525,1)$. The right sub-domain spans from $(0.525,0)$ to $(1,1)$. The left sub-domain has elements that are three times smaller compared to the ones for the right sub-domain.

    At non-matching interfaces, we need to provide different boundary IDs for the cells that make up the two parts (because, while they may be physically adjacent, they are not logically neighbors given that the faces of cells on both sides do not match, and the Triangulation class will therefore treat the interface between the two parts as a "boundary"). These boundary IDs have to differ because later on RemotePointEvaluation has to search for remote points for each face, that are defined in the same mesh (since we merge the mesh) but not on the same side of the non-matching interface. As a consequence, we declare at the top symbolic names for these boundary indicators, and ensure that we return a set with these values to the caller for later use.

    The actual mesh is then constructed by first constructing the left and right parts separately (setting material ids to zero and one, respectively), and using the appropriate boundary ids for all parts of the mesh. We then use GridGenerator::merge_triangulations() to combine them into one (non-matching) mesh. We have to pay attention that should the two sub-triangulations have vertices at the same locations, that they are not merged (connecting the two triangulations logically) since we want the interface to be an actual boundary. We achieve this by providing a tolerance of zero for the merge, see the documentation of the function GridGenerator::merge_triangulations().

      template <int dim>
    @@ -1595,7 +1595,7 @@
     

    Next, we set up the inverse mass operator and the acoustic operator. Using remote_value_type=VectorizedArray<Number> makes the operator use point-to-point interpolation. These two objects are then used to create a RungeKutta2 object to perform the time integration.

    -

    We also compute the maximum speed of sound, needed for the computation of the time-step size, and then run the time integrator. For the examples considered here, we found a limiting Courant number of $\mathrm{Cr}\approx 0.36$ to maintain stability. To ensure, the error of the temporal discretization is small, we use a considerably smaller Courant number of $0.2$.

    +

    We also compute the maximum speed of sound, needed for the computation of the time-step size, and then run the time integrator. For the examples considered here, we found a limiting Courant number of $\mathrm{Cr}\approx 0.36$ to maintain stability. To ensure, the error of the temporal discretization is small, we use a considerably smaller Courant number of $0.2$.

      const auto inverse_mass_operator =
      std::make_shared<InverseMassOperator<dim, Number>>(matrix_free);
     
    @@ -1907,7 +1907,7 @@
      }

    Results

    Vibrating membrane: Point-to-point interpolation vs. Nitsche-type mortaring

    -

    We compare the results of the simulations after the last time step, i.e. at $t=8T$. The $y$-component of the velocity field using Nitsche-type mortaring is depicted on the left. The same field using point-to-point interpolation is depicted on the right.

    +

    We compare the results of the simulations after the last time step, i.e. at $t=8T$. The $y$-component of the velocity field using Nitsche-type mortaring is depicted on the left. The same field using point-to-point interpolation is depicted on the right.

    @@ -1918,7 +1918,7 @@ /usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 2024-11-15 06:44:33.803710645 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_9.html 2024-11-15 06:44:33.803710645 +0000 @@ -137,7 +137,7 @@

    Introduction

    In this example, our aims are the following:

    1. -solve the advection equation $\beta \cdot \nabla u = f$;
    2. +solve the advection equation $\beta \cdot \nabla u = f$;
    3. show how we can use multiple threads to get results quicker if we have a multi-processor machine;
    4. @@ -146,51 +146,51 @@

      While the second aim is difficult to describe in general terms without reference to the code, we will discuss the other two aims in the following. The use of multiple threads will then be detailed at the relevant places within the program. We will, however, follow the general discussion of the WorkStream approach detailed in the Parallel computing with multiple processors accessing shared memory documentation topic.

      Discretizing the advection equation

      In the present example program, we want to numerically approximate the solution of the advection equation

      -\[
+<picture><source srcset=\[
   \beta \cdot \nabla u = f,
-\] +\]" src="form_7845.png"/>

      -

      where $\beta$ is a vector field that describes the advection direction and speed (which may be dependent on the space variables if $\beta=\beta(\mathbf x)$), $f$ is a source function, and $u$ is the solution. The physical process that this equation describes is that of a given flow field $\beta$, with which another substance is transported, the density or concentration of which is given by $u$. The equation does not contain diffusion of this second species within its carrier substance, but there are source terms.

      +

      where $\beta$ is a vector field that describes the advection direction and speed (which may be dependent on the space variables if $\beta=\beta(\mathbf x)$), $f$ is a source function, and $u$ is the solution. The physical process that this equation describes is that of a given flow field $\beta$, with which another substance is transported, the density or concentration of which is given by $u$. The equation does not contain diffusion of this second species within its carrier substance, but there are source terms.

      It is obvious that at the inflow, the above equation needs to be augmented by boundary conditions:

      -\[
+<picture><source srcset=\[
   u = g \qquad\qquad \mathrm{on}\ \partial\Omega_-,
-\] +\]" src="form_7846.png"/>

      -

      where $\partial\Omega_-$ describes the inflow portion of the boundary and is formally defined by

      -\[
+<p> where <picture><source srcset=$\partial\Omega_-$ describes the inflow portion of the boundary and is formally defined by

      +\[
   \partial\Omega_-
   =
   \{{\mathbf x}\in \partial\Omega: \beta\cdot{\mathbf n}({\mathbf x}) < 0\},
-\] +\]" src="form_7848.png"/>

      -

      and ${\mathbf n}({\mathbf x})$ being the outward normal to the domain at point ${\mathbf x}\in\partial\Omega$. This definition is quite intuitive, since as ${\mathbf n}$ points outward, the scalar product with $\beta$ can only be negative if the transport direction $\beta$ points inward, i.e. at the inflow boundary. The mathematical theory states that we must not pose any boundary condition on the outflow part of the boundary.

      -

      Unfortunately, the equation stated above cannot be solved in a stable way using the standard finite element method. The problem is that solutions to this equation possess insufficient regularity perpendicular to the transport direction: while they are smooth along the streamlines defined by the "wind field" $\beta$, they may be discontinuous perpendicular to this direction. This is easy to understand: what the equation $\beta \cdot
-\nabla u = f$ means is in essence that the rate of change of $u$ in direction $\beta$ equals $f$. But the equation has no implications for the derivatives in the perpendicular direction, and consequently if $u$ is discontinuous at a point on the inflow boundary, then this discontinuity will simply be transported along the streamline of the wind field that starts at this boundary point. These discontinuities lead to numerical instabilities that make a stable solution by a standard continuous finite element discretization impossible.

      +

      and ${\mathbf n}({\mathbf x})$ being the outward normal to the domain at point ${\mathbf x}\in\partial\Omega$. This definition is quite intuitive, since as ${\mathbf n}$ points outward, the scalar product with $\beta$ can only be negative if the transport direction $\beta$ points inward, i.e. at the inflow boundary. The mathematical theory states that we must not pose any boundary condition on the outflow part of the boundary.

      +

      Unfortunately, the equation stated above cannot be solved in a stable way using the standard finite element method. The problem is that solutions to this equation possess insufficient regularity perpendicular to the transport direction: while they are smooth along the streamlines defined by the "wind field" $\beta$, they may be discontinuous perpendicular to this direction. This is easy to understand: what the equation $\beta \cdot
+\nabla u = f$ means is in essence that the rate of change of $u$ in direction $\beta$ equals $f$. But the equation has no implications for the derivatives in the perpendicular direction, and consequently if $u$ is discontinuous at a point on the inflow boundary, then this discontinuity will simply be transported along the streamline of the wind field that starts at this boundary point. These discontinuities lead to numerical instabilities that make a stable solution by a standard continuous finite element discretization impossible.

      A standard approach to address this difficulty is the "streamline-upwind -Petrov-Galerkin" (SUPG) method, sometimes also called the streamline diffusion method. A good explanation of the method can be found in [elman2005] . Formally, this method replaces the step in which we derive the weak form of the differential equation from the strong form: Instead of multiplying the equation by a test function $v$ and integrating over the domain, we instead multiply by $v + \delta \beta\cdot\nabla v$, where $\delta$ is a parameter that is chosen in the range of the (local) mesh width $h$; good results are usually obtained by setting $\delta=0.1h$. (Why this is called "streamline diffusion" will be explained below; for the moment, let us simply take for granted that this is how we derive a stable discrete formulation.) The value for $\delta$ here is small enough that we do not introduce excessive diffusion, but large enough that the resulting problem is well-posed.

      -

      Using the test functions as defined above, an initial weak form of the problem would ask for finding a function $u_h$ so that for all test functions $v_h$ we have

      -\[
+Petrov-Galerkin (SUPG) method, sometimes also called the streamline diffusion method. A good explanation of the method can be found in [elman2005] . Formally, this method replaces the step in which we derive the weak form of the differential equation from the strong form: Instead of multiplying the equation by a test function $v$ and integrating over the domain, we instead multiply by $v + \delta \beta\cdot\nabla v$, where $\delta$ is a parameter that is chosen in the range of the (local) mesh width $h$; good results are usually obtained by setting $\delta=0.1h$. (Why this is called "streamline diffusion" will be explained below; for the moment, let us simply take for granted that this is how we derive a stable discrete formulation.) The value for $\delta$ here is small enough that we do not introduce excessive diffusion, but large enough that the resulting problem is well-posed.

      +

      Using the test functions as defined above, an initial weak form of the problem would ask for finding a function $u_h$ so that for all test functions $v_h$ we have

      +\[
   (\beta \cdot \nabla u_h, v_h + \delta \beta\cdot\nabla v_h)_\Omega
   =
   (f, v_h + \delta \beta\cdot\nabla v_h)_\Omega.
-\] +\]" src="form_7854.png"/>

      However, we would like to include inflow boundary conditions $u=g$ weakly into this problem, and this can be done by requiring that in addition to the equation above we also have

      -\[
+<picture><source srcset=\[
   (u_h, w_h)_{\partial\Omega_-}
   =
   (g, w_h)_{\partial\Omega_-}
-\] +\]" src="form_7855.png"/>

      -

      for all test functions $w_h$ that live on the boundary and that are from a suitable test space. It turns out that a suitable space of test functions happens to be $\beta\cdot {\mathbf n}$ times the traces of the functions $v_h$ in the test space we already use for the differential equation in the domain. Thus, we require that for all test functions $v_h$ we have

      -\[
+<p> for all test functions <picture><source srcset=$w_h$ that live on the boundary and that are from a suitable test space. It turns out that a suitable space of test functions happens to be $\beta\cdot {\mathbf n}$ times the traces of the functions $v_h$ in the test space we already use for the differential equation in the domain. Thus, we require that for all test functions $v_h$ we have

      +\[
   (u_h, \beta\cdot {\mathbf n} v_h)_{\partial\Omega_-}
   =
   (g, \beta\cdot {\mathbf n} v_h)_{\partial\Omega_-}.
-\] +\]" src="form_7857.png"/>

      -

      Without attempting a justification (see again the literature on the finite element method in general, and the streamline diffusion method in particular), we can combine the equations for the differential equation and the boundary values in the following weak formulation of our stabilized problem: find a discrete function $u_h$ such that for all discrete test functions $v_h$ there holds

      -\[
+<p> Without attempting a justification (see again the literature on the finite element method in general, and the streamline diffusion method in particular), we can combine the equations for the differential equation and the boundary values in the following weak formulation of our stabilized problem: find a discrete function <picture><source srcset=$u_h$ such that for all discrete test functions $v_h$ there holds

      +\[
   (\beta \cdot \nabla u_h, v_h + \delta \beta\cdot\nabla v_h)_\Omega
   -
   (u_h, \beta\cdot {\mathbf n} v_h)_{\partial\Omega_-}
@@ -198,25 +198,25 @@
   (f, v_h + \delta \beta\cdot\nabla v_h)_\Omega
   -
   (g, \beta\cdot {\mathbf n} v_h)_{\partial\Omega_-}.
-\] +\]" src="form_7858.png"/>

      One would think that this leads to a system matrix to be inverted of the form

      -\[
+<picture><source srcset=\[
   a_{ij} =
   (\beta \cdot \nabla \varphi_i,
    \varphi_j + \delta \beta\cdot\nabla \varphi_j)_\Omega
   -
   (\varphi_i, \beta\cdot {\mathbf n} \varphi_j)_{\partial\Omega_-},
-\] +\]" src="form_7859.png"/>

      -

      with basis functions $\varphi_i,\varphi_j$. However, this is a pitfall that happens to every numerical analyst at least once (including the author): we have here expanded the solution $u_h = \sum_i U_i \varphi_i$, but if we do so, we will have to solve the problem

      -\[
+<p> with basis functions <picture><source srcset=$\varphi_i,\varphi_j$. However, this is a pitfall that happens to every numerical analyst at least once (including the author): we have here expanded the solution $u_h = \sum_i U_i \varphi_i$, but if we do so, we will have to solve the problem

      +\[
   U^T A = F^T,
-\] +\]" src="form_7861.png"/>

      where $U$ is the vector of expansion coefficients, i.e., we have to solve the transpose problem of what we might have expected naively.

      This is a point we made in the introduction of step-3. There, we argued that to avoid this very kind of problem, one should get in the habit of always multiplying with test functions from the left instead of from the right to obtain the correct matrix right away. In order to obtain the form of the linear system that we need, it is therefore best to rewrite the weak formulation to

      -\[
+<picture><source srcset=\[
   (v_h + \delta \beta\cdot\nabla v_h, \beta \cdot \nabla u_h)_\Omega
   -
   (\beta\cdot {\mathbf n} v_h, u_h)_{\partial\Omega_-}
@@ -224,85 +224,85 @@
   (v_h + \delta \beta\cdot\nabla v_h, f)_\Omega
   -
   (\beta\cdot {\mathbf n} v_h, g)_{\partial\Omega_-}
-\] +\]" src="form_7862.png"/>

      and then to obtain

      -\[
+<picture><source srcset=\[
   a_{ij} =
   (\varphi_i + \delta \beta \cdot \nabla \varphi_i,
    \beta\cdot\nabla \varphi_j)_\Omega
   -
   (\beta\cdot {\mathbf n} \varphi_i, \varphi_j)_{\partial\Omega_-},
-\] +\]" src="form_7863.png"/>

      as system matrix. We will assemble this matrix in the program.

      Why is this method called "streamline diffusion"?

      Looking at the bilinear form mentioned above, we see that the discrete solution has to satisfy an equation of which the left hand side in weak form has a domain term of the kind

      -\[
+<picture><source srcset=\[
   (v_h + \delta \beta\cdot\nabla v_h, \beta \cdot \nabla u_h)_\Omega,
-\] +\]" src="form_7864.png"/>

      or if we split this up, the form

      -\[
+<picture><source srcset=\[
   (v_h, \beta \cdot \nabla u_h)_\Omega
   +
   (\delta \beta\cdot\nabla v_h, \beta \cdot \nabla u_h)_\Omega.
-\] +\]" src="form_7865.png"/>

      If we wanted to see what strong form of the equation that would correspond to, we need to integrate the second term. This yields the following formulation, where for simplicity we'll ignore boundary terms for now:

      -\[
+<picture><source srcset=\[
   (v_h, \beta \cdot \nabla u_h)_\Omega
   -
   \left(v_h, \delta \nabla \cdot \left[\beta \left(\beta \cdot \nabla
   u_h\right)\right]\right)_\Omega
   +
   \text{boundary terms}.
-\] +\]" src="form_7866.png"/>

      -

      Let us assume for a moment that the wind field $\beta$ is divergence-free, i.e., that $\nabla \cdot \beta = 0$. Then applying the product rule to the derivative of the term in square brackets on the right and using the divergence-freeness will give us the following:

      -\[
+<p> Let us assume for a moment that the wind field <picture><source srcset=$\beta$ is divergence-free, i.e., that $\nabla \cdot \beta = 0$. Then applying the product rule to the derivative of the term in square brackets on the right and using the divergence-freeness will give us the following:

      +\[
   (v_h, \beta \cdot \nabla u_h)_\Omega
   -
   \left(v_h, \delta \left[\beta \cdot \nabla\right] \left[\beta \cdot \nabla
   \right]u_h\right)_\Omega
   +
   \text{boundary terms}.
-\] +\]" src="form_7868.png"/>

      That means that the strong form of the equation would be of the sort

      -\[
+<picture><source srcset=\[
   \beta \cdot \nabla u_h
   -
   \delta
   \left[\beta \cdot \nabla\right] \left[\beta \cdot \nabla
   \right] u_h.
-\] +\]" src="form_7869.png"/>

      -

      What is important to recognize now is that $\beta\cdot\nabla$ is the derivative in direction $\beta$. So, if we denote this by $\beta\cdot\nabla=\frac{\partial}{\partial \beta}$ (in the same way as we often write $\mathbf n\cdot\nabla=\frac{\partial}{\partial n}$ for the derivative in normal direction at the boundary), then the strong form of the equation is

      -\[
+<p> What is important to recognize now is that <picture><source srcset=$\beta\cdot\nabla$ is the derivative in direction $\beta$. So, if we denote this by $\beta\cdot\nabla=\frac{\partial}{\partial \beta}$ (in the same way as we often write $\mathbf n\cdot\nabla=\frac{\partial}{\partial n}$ for the derivative in normal direction at the boundary), then the strong form of the equation is

      +\[
   \beta \cdot \nabla u_h
   -
   \delta
   \frac{\partial^2}{\partial\beta^2} u_h.
-\] +\]" src="form_7873.png"/> /usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html differs (HTML document, UTF-8 Unicode text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 2024-11-15 06:44:33.879711324 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/step_90.html 2024-11-15 06:44:33.883711359 +0000 @@ -139,80 +139,80 @@ This program was contributed by Vladimir Yushutin and Timo Heister, Clemson University, 2023.

      This material is based upon work partly supported by the National Science Foundation Award DMS-2028346, OAC-2015848, EAR-1925575, and by the Computational Infrastructure in Geodynamics initiative (CIG), through the NSF under Award EAR-0949446, EAR-1550901, EAR-2149126 via the University of California – Davis.

      Introduction

      -

      In this tutorial, we implement the trace finite element method (TraceFEM) in deal.II. TraceFEM solves PDEs posed on a possibly evolving $(dim-1)$-dimensional surface $\Gamma$ employing a fixed uniform background mesh of a $dim$-dimensional domain in which the surface is embedded. Such surface PDEs arise in problems involving material films with complex properties and in other situations in which a non-trivial condition is imposed on either a stationary or a moving interface. Here we consider a steady, complex, non-trivial surface and the prototypical Laplace-Beltrami equation which is a counterpart of the Poisson problem on flat domains.

      +

      In this tutorial, we implement the trace finite element method (TraceFEM) in deal.II. TraceFEM solves PDEs posed on a possibly evolving $(dim-1)$-dimensional surface $\Gamma$ employing a fixed uniform background mesh of a $dim$-dimensional domain in which the surface is embedded. Such surface PDEs arise in problems involving material films with complex properties and in other situations in which a non-trivial condition is imposed on either a stationary or a moving interface. Here we consider a steady, complex, non-trivial surface and the prototypical Laplace-Beltrami equation which is a counterpart of the Poisson problem on flat domains.

      Being an unfitted method, TraceFEM allows to circumvent the need of remeshing of an evolving surface if it is implicitly given by the zero contour of a level-set function. At the same time, it easily provides with an extension of the discrete solution to a neighborhood of the surface which turns out to be very handy in case of non-stationary interfaces and films. Certainly, this flexibility comes with a price: one needs to design the nodes and weights for a quadrature customized for each implicit intersection of the zero level-set and the background mesh. Moreover, these intersections may be of arbitrary shape and size manifesting in the so-called "small cut" problem and requiring addition of a stabilization form that restores well-conditioning of the problem.

      -

      Two aspects are of our focus. First, the surface approximation is separated from the discretization of the surface PDE, e.g., a $Q_2$ discrete level-set and a $Q_1$ solution are possible on the same bulk triangulation. Second, we make sure that the performance of TraceFEM in the parallel implementation corresponds to that of a classical fitted FEM for a two-dimensional problem. We demonstrate how to achieve both goals by using a combination of MeshWorker and NonMatching capabilities.

      +

      Two aspects are of our focus. First, the surface approximation is separated from the discretization of the surface PDE, e.g., a $Q_2$ discrete level-set and a $Q_1$ solution are possible on the same bulk triangulation. Second, we make sure that the performance of TraceFEM in the parallel implementation corresponds to that of a classical fitted FEM for a two-dimensional problem. We demonstrate how to achieve both goals by using a combination of MeshWorker and NonMatching capabilities.

      A natural alternative to TraceFEM in solving surface PDEs is the parametric surface finite element method. The latter method relies on an explicit parametrization of the surface which may be not feasible especially for evolving interfaces with an unknown in advance shape - in this sense, TraceFEM is a technique inspired by the level-set description of interfaces. However, the parametric surface finite element method, when applicable, enjoys many well-known properties of fitted methods on flat domains provided the geometric errors - which a present for both methods - are taken under control.

      A non-trivial surface

      -

      A fitted FEM on a flat two-dimensional domain, if discretized by piecewise linears with $N$ degrees of freedom, typically results in $O(h)=O(N^{-1/2})$ convergence rate of the energy error; requires $O(N)$ storage for the degrees of freedom; and, more importantly, takes $O(N)$ of construction time to create them, i.e. to mesh the domain. TraceFEM, although solving a two-dimensional problem, relies on the inherently three-dimensional mesh on which the level-set function must be defined and, if implemented naively, suffers from the increased storage and the increased construction time in terms of the active degrees of freedom $N_a$ that actually enters the scheme with, hopefully, $O(N_a^{-1/2})$ error. To combat these possible bottlenecks, we create iteratively a mesh which is localized near the zero contour line of the level set function, i.e near the surface, to restore the aforementioned two-dimensional performance typical for fitted FEM, see the first three typical iterations of this methodology below.

      +

      A fitted FEM on a flat two-dimensional domain, if discretized by piecewise linears with $N$ degrees of freedom, typically results in $O(h)=O(N^{-1/2})$ convergence rate of the energy error; requires $O(N)$ storage for the degrees of freedom; and, more importantly, takes $O(N)$ of construction time to create them, i.e. to mesh the domain. TraceFEM, although solving a two-dimensional problem, relies on the inherently three-dimensional mesh on which the level-set function must be defined and, if implemented naively, suffers from the increased storage and the increased construction time in terms of the active degrees of freedom $N_a$ that actually enters the scheme with, hopefully, $O(N_a^{-1/2})$ error. To combat these possible bottlenecks, we create iteratively a mesh which is localized near the zero contour line of the level set function, i.e near the surface, to restore the aforementioned two-dimensional performance typical for fitted FEM, see the first three typical iterations of this methodology below.

      Iterative localization of the zero contour of a typical level set
      -

      The cells colored by red cary the active degrees of freedom (total number $N_a$) as the level set is not sign-definite at support points. Notice also that the mesh is graded: any cell has at most 4 neighbors adjacent to each of 6 faces.

      -

      Once a desired geometry approximation $\Gamma_h$ is achieved using the iterative approach above, we can start forming the linear system using the constructed normals and quadratures. For the purposes of the tutorial we choose a non-trivial surface $\Gamma$ given by

      -\begin{equation*}
+<p>The cells colored by red cary the active degrees of freedom (total number <picture><source srcset=$N_a$) as the level set is not sign-definite at support points. Notice also that the mesh is graded: any cell has at most 4 neighbors adjacent to each of 6 faces.

      +

      Once a desired geometry approximation $\Gamma_h$ is achieved using the iterative approach above, we can start forming the linear system using the constructed normals and quadratures. For the purposes of the tutorial we choose a non-trivial surface $\Gamma$ given by

      +\begin{equation*}
   \frac{x^2}{4}+ y^2 + \frac{4  z^2} {(1 + 0.5  \sin(\pi  x))^{2}} = 1
-\end{equation*} +\end{equation*}" src="form_7903.png"/>

      -

      The OY and OX views of this tamarind-shaped, exact surface $\Gamma$ are shown below along with the mesh after three iterations (the approximation $\Gamma_h$ is not shown).

      +

      The OY and OX views of this tamarind-shaped, exact surface $\Gamma$ are shown below along with the mesh after three iterations (the approximation $\Gamma_h$ is not shown).

      OY(left) and OZ(right) cross-sections of the background mesh along with the exact surface

      Model problem

      We would like to solve the simplest possible problem defined on a surface, namely the Laplace–Beltrami equation,

      -\begin{equation*}
+<picture><source srcset=\begin{equation*}
  -\Delta_\Gamma u + c u = f \qquad  \text{in }\, \Gamma,
-\end{equation*} +\end{equation*}" src="form_7904.png"/>

      -

      where we take $c=1$ for concreteness. We added the term $cu$ to the left-hand side so the problem becomes well-posed in the absence of any boundary; an alternative could be to take $c=0$ but impose the zero mean condition.

      +

      where we take $c=1$ for concreteness. We added the term $cu$ to the left-hand side so the problem becomes well-posed in the absence of any boundary; an alternative could be to take $c=0$ but impose the zero mean condition.

      Manufactured exact solution

      We choose the test solution and the right-hand side forcing as the restriction to $\Gamma$ of

      -\begin{equation*}
+<picture><source srcset=\begin{equation*}
  u(x,y,z)=xy\,,\quad
  f(x,y,z)=xy + 2.0\,\mathbf{n}_x \mathbf{n}_y + \kappa  (y \mathbf{n}_x + x\mathbf{n}_y),
-\end{equation*} +\end{equation*}" src="form_7907.png"/>

      -

      where the latter is manufactured using the exact normal $\mathbf{n}$, the exact Hessian $\nabla^2\mathbf{n}$ and the mean curvature, $\kappa=\mathrm{div} n$ of the surface. Note that we do not need to impose any boundary conditions as the surface $\Gamma$ is closed.

      +

      where the latter is manufactured using the exact normal $\mathbf{n}$, the exact Hessian $\nabla^2\mathbf{n}$ and the mean curvature, $\kappa=\mathrm{div} n$ of the surface. Note that we do not need to impose any boundary conditions as the surface $\Gamma$ is closed.

      The Trace Finite Element Method

      -

      TraceFEM is an unfitted method: the surface $\Gamma$ is immersed into a regular, uniform background mesh that stays fixed even if the surface would be evolving. To solve Laplace–Beltrami equation, we first construct a surface approximation $\Gamma_h$ by intersecting implicitly the cells of the background mesh with the iso surface of an approximation of the level-set field. We note that we never actually create any two-dimensional meshes for the surface but only compute approximate quadrature points and surface normals. Next we distribute degrees of freedom over a thin subdomain $\Omega_h$ that completely covers $\Gamma_h$ and that consists of the intersected cells $\mathcal{T}_\Gamma^h$,

      -\begin{equation*}
+<p>TraceFEM is an unfitted method: the surface <picture><source srcset=$\Gamma$ is immersed into a regular, uniform background mesh that stays fixed even if the surface would be evolving. To solve Laplace–Beltrami equation, we first construct a surface approximation $\Gamma_h$ by intersecting implicitly the cells of the background mesh with the iso surface of an approximation of the level-set field. We note that we never actually create any two-dimensional meshes for the surface but only compute approximate quadrature points and surface normals. Next we distribute degrees of freedom over a thin subdomain $\Omega_h$ that completely covers $\Gamma_h$ and that consists of the intersected cells $\mathcal{T}_\Gamma^h$,

      +\begin{equation*}
  \mathcal{T}_\Gamma^h = \{ T \in \mathcal{T}^{h} : T \cap \Gamma_h \neq \emptyset \}.
-\end{equation*} +\end{equation*}" src="form_7910.png"/>

      -

      The finite element space where we want to find our numerical solution, $u_h$, is now

      -\begin{equation*}
+<p> The finite element space where we want to find our numerical solution, <picture><source srcset=$u_h$, is now

      +\begin{equation*}
  V_h = \{ v \in C(\Omega_h) : v \in Q_p(T), \, T \in \mathcal{T}_\Gamma^h \},
-\end{equation*} +\end{equation*}" src="form_7911.png"/>

      -

      where $\Omega_h$ is the union of all intersected cells from $\bigcup_{T \in \mathcal{T}_\Gamma^h} \overline{T}$.

      -

      To create $V_h$, we first add an FE_Q and an FE_Nothing element to an hp::FECollection. We then iterate over each cell $T$ and, depending on whether $T$ belongs to $\mathcal{T}_\Gamma^h$ or not, we set the active_fe_index to either 0 or 1. To determine whether a cell is intersected or not, we use the class NonMatching::MeshClassifier.

      +

      where $\Omega_h$ is the union of all intersected cells from $\bigcup_{T \in \mathcal{T}_\Gamma^h} \overline{T}$.

      +

      To create $V_h$, we first add an FE_Q and an FE_Nothing element to an hp::FECollection. We then iterate over each cell $T$ and, depending on whether $T$ belongs to $\mathcal{T}_\Gamma^h$ or not, we set the active_fe_index to either 0 or 1. To determine whether a cell is intersected or not, we use the class NonMatching::MeshClassifier.

      A natural candidate for a weak formulation involves the following (bi)linear forms

      -\begin{align*}
+<picture><source srcset=\begin{align*}
  a_h(u_h, v_h) =  (\nabla_{\Gamma_h} u_h, \nabla_{\Gamma_h} v_h)_{\Gamma_h}+(u_h, v_h)_{\Gamma_h}\,,\qquad
  L_h(v_h)      =  (f^e,v_h)_{\Gamma_h}.
-\end{align*} +\end{align*}" src="form_7913.png"/>

      -

      where $f^e$ is an extension (non-necessarily the the so-called normal extension) of $f$ from $\Gamma$ to $\Omega_h$. Note that the right-hand side $f$ of the Laplace-Beltrami problem is defined on the exact surface $\Gamma$ only and we need to specify how to evaluate its action on the perturbed approximate geometry $\Gamma_h$ which is immersed in $\Omega_h$. For the purposes of this test, the forcing $f$ is manufactured using $u=xy$ and the level-set function and, therefore, is a function of Cartesian coordinates $x$, $y$, $z$. The latter is identified with $f^e$ on $\Gamma_h$ and it is not the normal extension of the function $f$.

      -

      However, the so-called "small-cut problem" may arise and one should introduce the stabilized version of TraceFEM: Find $u_h \in V_h$ such that

      -\begin{equation*}
+<p> where <picture><source srcset=$f^e$ is an extension (non-necessarily the the so-called normal extension) of $f$ from $\Gamma$ to $\Omega_h$. Note that the right-hand side $f$ of the Laplace-Beltrami problem is defined on the exact surface $\Gamma$ only and we need to specify how to evaluate its action on the perturbed approximate geometry $\Gamma_h$ which is immersed in $\Omega_h$. For the purposes of this test, the forcing $f$ is manufactured using $u=xy$ and the level-set function and, therefore, is a function of Cartesian coordinates $x$, $y$, $z$. The latter is identified with $f^e$ on $\Gamma_h$ and it is not the normal extension of the function $f$.

      +

      However, the so-called "small-cut problem" may arise and one should introduce the stabilized version of TraceFEM: Find $u_h \in V_h$ such that

      +\begin{equation*}
  a_h(u_h,v_h) + s_h(u_h, v_h) = L_h(v_h), \quad \forall v_h \in V_\Omega^h.
-\end{equation*} +\end{equation*}" src="form_7916.png"/>

      Here the normal-gradient stabilization $s_h$ involves the three-dimensional integration over whole (but intersected) cells and is given by

      -\begin{equation*}
+<picture><source srcset=\begin{equation*}
  s_h(u_h,v_h) = h^{-1}(\mathbf{n}_h\cdot\nabla u_h, \mathbf{n}_h\cdot\nabla v_h)_{\Omega_h},
-\end{equation*} +\end{equation*}" src="form_7917.png"/>

      -

      Note that the $h^{-1}$ scaling may be relaxed for sufficiently smooth solutions such as the manufactured one, but we choose the strong scaling to demonstrate the extreme case [traceFEM_review_2017].

      +

      Note that the $h^{-1}$ scaling may be relaxed for sufficiently smooth solutions such as the manufactured one, but we choose the strong scaling to demonstrate the extreme case [traceFEM_review_2017].

      Discrete Level Set Function

      -

      In TraceFEM we construct the approximation $\Gamma_h$ using the interpolant $\psi_h$ of the exact level-set function on the bulk triangulation:

      -\begin{align*}
+<p>In TraceFEM we construct the approximation <picture><source srcset=$\Gamma_h$ using the interpolant $\psi_h$ of the exact level-set function on the bulk triangulation:

      +\begin{align*}
  \Gamma_h &= \{x \in \mathbb{R}^{\text{3}} : \psi_h(x) = 0 \}.
-\end{align*} +\end{align*}" src="form_7920.png"/>

      -

      The exact normal vector $\mathbf{n}$ is approximated by $\mathbf{n}_h=\nabla\psi_h/\|\nabla\psi_h\|$ which, together with approximate quadrature for the integration over $\Gamma_h$, leads to the so-called "geometrical error". Luckily, one can show [traceFEM_review_2017] that the method converges optimally for the model problem if the same element space $V_h$ is employed for the discrete functions and for the interpolation of the level set function as if the exact domain would have been used. Furthermore, deal.II allows to choose independently the discrete space for the solution and a higher-order discrete space for the level set function for a more accurate geometric approximation.

      +

      The exact normal vector $\mathbf{n}$ is approximated by $\mathbf{n}_h=\nabla\psi_h/\|\nabla\psi_h\|$ which, together with approximate quadrature for the integration over $\Gamma_h$, leads to the so-called "geometrical error". Luckily, one can show [traceFEM_review_2017] that the method converges optimally for the model problem if the same element space $V_h$ is employed for the discrete functions and for the interpolation of the level set function as if the exact domain would have been used. Furthermore, deal.II allows to choose independently the discrete space for the solution and a higher-order discrete space for the level set function for a more accurate geometric approximation.

      The commented program

        #href_anchor"line">  #include <deal.II/base/function.h>
        #include <deal.II/base/numbers.h>
      @@ -367,7 +367,7 @@
      ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
      ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)

      Exact solution

      -

      The following class defines the chosen exact solution and its surface gradient. The exact solution we try to reproduce is $u=xy$ and it may be evaluated away from $\Gamma$ as any other function of Cartesian points. Also note that the gradient() method returns the surface gradient $\nabla_\Gamma u$ of the exact solution.

      +

      The following class defines the chosen exact solution and its surface gradient. The exact solution we try to reproduce is $u=xy$ and it may be evaluated away from $\Gamma$ as any other function of Cartesian points. Also note that the gradient() method returns the surface gradient $\nabla_\Gamma u$ of the exact solution.

        template <int dim>
        class AnalyticalSolution : public Function<dim>
        {
      @@ -419,7 +419,7 @@
       
      numbers::NumberTraits< Number >::real_type norm() const

      Exact forcing

      -

      We choose the right hand side equal to the evaluation of the surface Laplacian for a manufactured solution $u$. This corresponds to the exact forcing $f=-\Delta_\Gamma u+u$:

      +

      We choose the right hand side equal to the evaluation of the surface Laplacian for a manufactured solution $u$. This corresponds to the exact forcing $f=-\Delta_\Gamma u+u$:

        template <int dim>
        class RightHandSide : public Function<dim>
        {
      @@ -588,7 +588,7 @@
      typename ActiveSelector::active_cell_iterator active_cell_iterator
      void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)

      Normal-gradient stabilization form of TraceFEM

      -

      The following class corresponds to the stabilization form, its contribution to the global matrix and to the error. More specifically, the method needs_cell_worker() indicates whether the bilinear form of the stabilization, unlike the main bilinear form of Laplace-Beltrami operator, needs the bulk cell quadratures. The cell worker which is useful in an accumulation by MeshWorkers is provided by the assemble_cell_worker() method. The remaining method evaluate_cell_worker() computes the stabilization error for the solution $u_h$, i.e $s_h(u_h,u_h)$. Also note that the method needs_cell_worker() indicates that the assembly and the evaluation of the form does require a bulk cell quadrature. This methodology may be utilized in the MeshWorker. The stabilization scaling is specified by $\mathrm{stabilization\_parameter}\cdot
+<p>The following class corresponds to the stabilization form, its contribution to the global matrix and to the error. More specifically, the method needs_cell_worker() indicates whether the bilinear form of the stabilization, unlike the main bilinear form of Laplace-Beltrami operator, needs the bulk cell quadratures. The cell worker which is useful in an accumulation by MeshWorkers is provided by the assemble_cell_worker() method. The remaining method evaluate_cell_worker() computes the stabilization error for the solution <picture><source srcset=$u_h$, i.e $s_h(u_h,u_h)$. Also note that the method needs_cell_worker() indicates that the assembly and the evaluation of the form does require a bulk cell quadrature. This methodology may be utilized in the MeshWorker. The stabilization scaling is specified by $\mathrm{stabilization\_parameter}\cdot
    h^\mathrm{stabilization\_exponent}$. For elliptic problems with smooth solutions we can choose any $-1\leq \mathrm{stabilization\_exponent} \leq 1$ and a sufficiently large $\mathrm{stabilization\_parameter}$ that depends of $\Gamma$.

        template <int dim>
        class NormalGradientVolumeStabilization
      @@ -1423,7 +1423,7 @@
      unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
      Definition mpi.cc:107

      Results

      -

      The numerical solution $u_h$ for a very fine mesh $\Gamma_h$ is shown below by plotting in Paraview the zero contour of the approximate level set $\psi_h$ and restricting the discrete solution $u_h$ to the resulting surface approximation $\Gamma_h$.

      +

      The numerical solution $u_h$ for a very fine mesh $\Gamma_h$ is shown below by plotting in Paraview the zero contour of the approximate level set $\psi_h$ and restricting the discrete solution $u_h$ to the resulting surface approximation $\Gamma_h$.

      @@ -1435,7 +1435,7 @@

      The results of the convergence study are shown in the following table. The experimental orders of convergence (EOC) are reported for the surface errors and the stabilization.

      - + @@ -1451,7 +1451,7 @@
      Cycle DOFS Rate Iterations $L^2$-Error EOC $H^1$-Error EOC $s_h^{1/2}(u_h)$ EOC
      Cycle DOFS Rate Iterations $L^2$-Error EOC $H^1$-Error EOC $s_h^{1/2}(u_h)$ EOC
      0 12370 - 15 7.6322e-02 - 3.6212e-01 - 2.2423e-01 -
      6 50122218 2.00 30 4.3891e-06 1.98 4.9219e-03 1.00 3.7042e-03 1.00
      -

      In this test we refine the mesh near the surface and, as a result, the number of degrees of freedom scales in the two-dimensional fashion. The optimal rates of error convergence in $L^2(\Gamma)$ and $H^1(\Gamma)$ norms are clearly observable. We also note the first order convergence of the stabilization $s_h^{1/2}(u_h)=\sqrt{s_h(u_h, u_h)}$ evaluated at the solution $u_h$.

      +

      In this test we refine the mesh near the surface and, as a result, the number of degrees of freedom scales in the two-dimensional fashion. The optimal rates of error convergence in $L^2(\Gamma)$ and $H^1(\Gamma)$ norms are clearly observable. We also note the first order convergence of the stabilization $s_h^{1/2}(u_h)=\sqrt{s_h(u_h, u_h)}$ evaluated at the solution $u_h$.

      Parallel scalability

      The weak and strong scalability test results are shown in the following figure. Clearly, the refine() method is responsible for the certain lack of parallel scalability.

      /usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 2024-11-15 06:44:33.903711538 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structCellData.html 2024-11-15 06:44:33.903711538 +0000 @@ -140,7 +140,7 @@

    Detailed Description

    template<int structdim>
    -struct CellData< structdim >

    The CellData class (and the related SubCellData class) is used to provide a comprehensive, but minimal, description of the cells when creating a triangulation via Triangulation::create_triangulation(). Specifically, each CellData object – describing one cell in a triangulation – has member variables for indices of the $2^d$ vertices (the actual coordinates of the vertices are described in a separate vector passed to Triangulation::create_triangulation(), so the CellData object only needs to store indices into that vector), the material id of the cell that can be used in applications to describe which part of the domain a cell belongs to (see the glossary entry on material ids), and a manifold id that is used to describe the geometry object that is responsible for this cell (see the glossary entry on manifold ids) to describe the manifold this object belongs to.

    +struct CellData< structdim >

    The CellData class (and the related SubCellData class) is used to provide a comprehensive, but minimal, description of the cells when creating a triangulation via Triangulation::create_triangulation(). Specifically, each CellData object – describing one cell in a triangulation – has member variables for indices of the $2^d$ vertices (the actual coordinates of the vertices are described in a separate vector passed to Triangulation::create_triangulation(), so the CellData object only needs to store indices into that vector), the material id of the cell that can be used in applications to describe which part of the domain a cell belongs to (see the glossary entry on material ids), and a manifold id that is used to describe the geometry object that is responsible for this cell (see the glossary entry on manifold ids) to describe the manifold this object belongs to.

    This structure is also used to represent data for faces and edges when used as a member of the SubCellData class. In this case, the template argument structdim of an object will be less than the dimension dim of the triangulation. If this is so, then vertices array represents the indices of the vertices of one face or edge of one of the cells passed to Triangulation::create_triangulation(). Furthermore, for faces the material id has no meaning, and the material_id field is reused to store a boundary_id instead to designate which part of the boundary the face or edge belongs to (see the glossary entry on boundary ids).

    An example showing how this class can be used is in the create_coarse_grid() function of step-14. There are also many more use cases in the implementation of the functions of the GridGenerator namespace.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2024-11-15 06:44:33.927711752 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structColorEnriched_1_1Helper.html 2024-11-15 06:44:33.927711752 +0000 @@ -154,7 +154,7 @@

    Detailed Description

    template<int dim, int spacedim = dim>
    struct ColorEnriched::Helper< dim, spacedim >

    ColorEnriched::Helper class creates a collection of FE_Enriched finite elements (hp::FECollection) to be used with DoFHandler in a domain with multiple, possibly overlapping, sub-domains with individual enrichment functions. Note that the overlapping regions may have multiple enrichment functions associated with them. This is implemented using a general constructor of FE_Enriched object which allows different enrichment functions.

    -

    Consider a domain with multiple enriched sub-domains which are disjoint i.e. not connected with each other. To ensure $C^0$ continuity at the interface between the enriched sub-domain (characterized by a single enrichment function) and the non-enriched domain, we can use an FE_Enriched object in the enriched sub-domain and in the non-enriched domain a standard finite element (eg: FE_Q) wrapped into an FE_Enriched object (which internally uses a dominating FE_Nothing object). Refer to the documentation on FE_Enriched for more information on this. It is to be noted that an FE_Enriched object is constructed using a base FE (FiniteElement objects) and one or more enriched FEs. FE_Nothing is a dummy enriched FE.

    +

    Consider a domain with multiple enriched sub-domains which are disjoint i.e. not connected with each other. To ensure $C^0$ continuity at the interface between the enriched sub-domain (characterized by a single enrichment function) and the non-enriched domain, we can use an FE_Enriched object in the enriched sub-domain and in the non-enriched domain a standard finite element (eg: FE_Q) wrapped into an FE_Enriched object (which internally uses a dominating FE_Nothing object). Refer to the documentation on FE_Enriched for more information on this. It is to be noted that an FE_Enriched object is constructed using a base FE (FiniteElement objects) and one or more enriched FEs. FE_Nothing is a dummy enriched FE.

    The situation becomes more complicated when two enriched sub-domains share an interface. When the number of enrichment functions are same for the sub-domains, FE_Enriched object of one sub-domain is constructed such that each enriched FE is paired (figuratively) with a FE_Nothing in the FE_Enriched object of the other sub-domain. For example, let the FEs fe_enr1 and fe_enr2, which will be used with enrichment functions, correspond to the two sub-domains. Then the FE_Enriched objects of the two sub-domains are built using [fe_base, fe_enr1, fe_nothing] and [fe_base, fe_nothing, fe_enr2] respectively. Note that the size of the vector of enriched FEs (used in FE_Enriched constructor) is equal to 2, the same as the number of enrichment functions. When the number of enrichment functions is not the same, additional enriched FEs are paired with FE_Nothing. This ensures that the enriched DOF's at the interface are set to zero by the DoFTools::make_hanging_node_constraints() function. Using these two strategies, we construct the appropriate FE_Enriched using the general constructor. Note that this is done on a mesh without hanging nodes.

    Now consider a domain with multiple sub-domains which may share an interface with each other. As discussed previously, the number of enriched FEs in the FE_Enriched object of each sub-domain needs to be equal to the number of sub-domains. This is because we are not using the information of how the domains are connected and any sub-domain may share interface with any other sub-domain (not considering overlaps for now!). However, in general, a given sub-domain shares an interface only with a few sub-domains. This warrants the use of a graph coloring algorithm to reduce the size of the vector of enriched FEs (used in the FE_Enriched constructor). By giving the sub-domains that share no interface the same color, a single 'std::function' that returns different enrichment functions for each sub-domain can be constructed. Then the size of the vector of enriched FEs is equal to the number of different colors used for predicates (or sub-domains).

    Note
    The graph coloring function, SparsityTools::color_sparsity_pattern, used for assigning colors to the sub-domains needs MPI (use Utilities::MPI::MPI_InitFinalize to initialize MPI and the necessary Zoltan setup). The coloring function, based on Zoltan, is a parallel coloring algorithm but is used in serial by SparsityTools::color_sparsity_pattern.
    /usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 2024-11-15 06:44:33.951711966 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1CommonInputs.html 2024-11-15 06:44:33.951711966 +0000 @@ -162,7 +162,7 @@

    In other words, just because we know the value of the spacedim template argument of the current class does not mean that the data type of the cell iterator that is currently being worked on is obvious.

    To make the cell iterator accessible nevertheless, this class uses an object of type std::any to store the cell iterator. You can think of this as being a void pointer that can point to anything. To use what is being used therefore requires the user to know the data type of the thing being pointed to.

    To make this work, the DataOut and related classes store in objects of the current type a representation of the cell. To get it back out, you would use the get_cell() function that requires you to say, as a template parameter, the dimension of the cell that is currently being processed. This is knowledge you typically have in an application: for example, if your application runs in dim space dimensions and you are currently using the DataOut class, then the cells that are worked on have data type DataOut<dim>::cell_iterator. Consequently, in a postprocessor, you can call inputs.get_cell<dim> . For technical reasons, however, C++ will typically require you to write this as inputs.template get_cell<dim> because the member function we call here requires that we explicitly provide the template argument.

    -

    Let us consider a complete example of a postprocessor that computes the fluid norm of the stress $\|\sigma\| = \|\eta \nabla u\|$ from the viscosity $\eta$ and the gradient of the fluid velocity, $\nabla u$, assuming that the viscosity is something that depends on the cell's material id. This can be done using a class we derive from DataPostprocessorScalar where we overload the DataPostprocessor::evaluate_vector_field() function that receives the values and gradients of the velocity (plus of other solution variables such as the pressure, but let's ignore those for the moment). Then we could use code such as this:

    template <int dim>
    +

    Let us consider a complete example of a postprocessor that computes the fluid norm of the stress $\|\sigma\| = \|\eta \nabla u\|$ from the viscosity $\eta$ and the gradient of the fluid velocity, $\nabla u$, assuming that the viscosity is something that depends on the cell's material id. This can be done using a class we derive from DataPostprocessorScalar where we overload the DataPostprocessor::evaluate_vector_field() function that receives the values and gradients of the velocity (plus of other solution variables such as the pressure, but let's ignore those for the moment). Then we could use code such as this:

    template <int dim>
    class ComputeStress : public DataPostprocessorScalar<dim>
    {
    public:
    /usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 2024-11-15 06:44:33.971712145 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structDataPostprocessorInputs_1_1Vector.html 2024-11-15 06:44:33.971712145 +0000 @@ -157,7 +157,7 @@

    Detailed Description

    template<int spacedim>
    struct DataPostprocessorInputs::Vector< spacedim >

    A structure that is used to pass information to DataPostprocessor::evaluate_vector_field(). It contains the values and (if requested) derivatives of a vector-valued solution variable at the evaluation points on a cell or face.

    -

    This class is also used if the solution vector is complex-valued (whether it is scalar- or vector-valued is immaterial in that case) since in that case, the DataOut and related classes take apart the real and imaginary parts of a solution vector. In practice, that means that if a solution vector has $N$ vector components (i.e., there are $N$ functions that form the solution of the PDE you are dealing with; $N$ is not the size of the solution vector), then if the solution is real-valued the solution_values variable below will be an array with as many entries as there are evaluation points on a cell, and each entry is a vector of length $N$ representing the $N$ solution functions evaluated at a point. On the other hand, if the solution is complex-valued (i.e., the vector passed to DataOut::build_patches() has complex-valued entries), then the solution_values member variable of this class will have $2N$ entries for each evaluation point. The first $N$ of these entries represent the real parts of the solution, and the second $N$ entries correspond to the imaginary parts of the solution evaluated at the evaluation point. The same layout is used for the solution_gradients and solution_hessians fields: First the gradients/Hessians of the real components, then all the gradients/Hessians of the imaginary components. There is more information about the subject in the documentation of the DataPostprocessor class itself. step-58 provides an example of how this class is used in a complex-valued situation.

    +

    This class is also used if the solution vector is complex-valued (whether it is scalar- or vector-valued is immaterial in that case) since in that case, the DataOut and related classes take apart the real and imaginary parts of a solution vector. In practice, that means that if a solution vector has $N$ vector components (i.e., there are $N$ functions that form the solution of the PDE you are dealing with; $N$ is not the size of the solution vector), then if the solution is real-valued the solution_values variable below will be an array with as many entries as there are evaluation points on a cell, and each entry is a vector of length $N$ representing the $N$ solution functions evaluated at a point. On the other hand, if the solution is complex-valued (i.e., the vector passed to DataOut::build_patches() has complex-valued entries), then the solution_values member variable of this class will have $2N$ entries for each evaluation point. The first $N$ of these entries represent the real parts of the solution, and the second $N$ entries correspond to the imaginary parts of the solution evaluated at the evaluation point. The same layout is used for the solution_gradients and solution_hessians fields: First the gradients/Hessians of the real components, then all the gradients/Hessians of the imaginary components. There is more information about the subject in the documentation of the DataPostprocessor class itself. step-58 provides an example of how this class is used in a complex-valued situation.

    Through the fields in the CommonInputs base class, this class also makes available access to the locations of evaluations points, normal vectors (if appropriate), and which cell data is currently being evaluated on (also if appropriate).

    Definition at line 399 of file data_postprocessor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2024-11-15 06:44:34.015712538 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structGeometryInfo.html 2024-11-15 06:44:34.015712538 +0000 @@ -1539,7 +1539,7 @@
    -

    Compute the value of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

    +

    Compute the value of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

    @@ -1567,7 +1567,7 @@
    -

    Compute the gradient of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

    +

    Compute the gradient of the $i$-th $d$-linear (i.e. (bi-,tri-)linear) shape function at location $\xi$.

    @@ -1598,13 +1598,13 @@

    For a (bi-, tri-)linear mapping from the reference cell, face, or edge to the object specified by the given vertices, compute the alternating form of the transformed unit vectors vertices. For an object of dimensionality dim, there are dim vectors with spacedim components each, and the alternating form is a tensor of rank spacedim-dim that corresponds to the wedge product of the dim unit vectors, and it corresponds to the volume and normal vectors of the mapping from reference element to the element described by the vertices.

    -

    For example, if dim==spacedim==2, then the alternating form is a scalar (because spacedim-dim=0) and its value equals $\mathbf v_1\wedge \mathbf
-v_2=\mathbf v_1^\perp \cdot\mathbf v_2$, where $\mathbf v_1^\perp$ is a vector that is rotated to the right by 90 degrees from $\mathbf v_1$. If dim==spacedim==3, then the result is again a scalar with value $\mathbf
+<p>For example, if dim==spacedim==2, then the alternating form is a scalar (because spacedim-dim=0) and its value equals  <picture><source srcset=$\mathbf v_1\wedge \mathbf
+v_2=\mathbf v_1^\perp \cdot\mathbf v_2$, where $\mathbf v_1^\perp$ is a vector that is rotated to the right by 90 degrees from $\mathbf v_1$. If dim==spacedim==3, then the result is again a scalar with value $\mathbf
 v_1\wedge \mathbf v_2 \wedge \mathbf v_3 = (\mathbf v_1\times \mathbf
-v_2)\cdot \mathbf v_3$, where $\mathbf v_1, \mathbf v_2, \mathbf v_3$ are the images of the unit vectors at a vertex of the unit dim-dimensional cell under transformation to the dim-dimensional cell in spacedim-dimensional space. In both cases, i.e. for dim==2 or 3, the result happens to equal the determinant of the Jacobian of the mapping from reference cell to cell in real space. Note that it is the actual determinant, not its absolute value as often used in transforming integrals from one coordinate system to another. In particular, if the object specified by the vertices is a parallelogram (i.e. a linear transformation of the reference cell) then the computed values are the same at all vertices and equal the (signed) area of the cell; similarly, for parallel-epipeds, it is the volume of the cell.

    -

    Likewise, if we have dim==spacedim-1 (e.g. we have a quad in 3d space, or a line in 2d), then the alternating product denotes the normal vector (i.e. a rank-1 tensor, since spacedim-dim=1) to the object at each vertex, where the normal vector's magnitude denotes the area element of the transformation from the reference object to the object given by the vertices. In particular, if again the mapping from reference object to the object under consideration here is linear (not bi- or trilinear), then the returned vectors are all parallel, perpendicular to the mapped object described by the vertices, and have a magnitude equal to the area/volume of the mapped object. If dim=1, spacedim=2, then the returned value is $\mathbf v_1^\perp$, where $\mathbf v_1$ is the image of the sole unit vector of a line mapped to the line in 2d given by the vertices; if dim=2, spacedim=3, then the returned values are $\mathbf v_1
-\wedge \mathbf v_2=\mathbf v_1 \times \mathbf v_2$ where $\mathbf
-v_1,\mathbf v_2$ are the two three-dimensional vectors that are tangential to the quad mapped into three-dimensional space.

    +v_2)\cdot \mathbf v_3$" src="form_561.png"/>, where $\mathbf v_1, \mathbf v_2, \mathbf v_3$ are the images of the unit vectors at a vertex of the unit dim-dimensional cell under transformation to the dim-dimensional cell in spacedim-dimensional space. In both cases, i.e. for dim==2 or 3, the result happens to equal the determinant of the Jacobian of the mapping from reference cell to cell in real space. Note that it is the actual determinant, not its absolute value as often used in transforming integrals from one coordinate system to another. In particular, if the object specified by the vertices is a parallelogram (i.e. a linear transformation of the reference cell) then the computed values are the same at all vertices and equal the (signed) area of the cell; similarly, for parallel-epipeds, it is the volume of the cell.

    +

    Likewise, if we have dim==spacedim-1 (e.g. we have a quad in 3d space, or a line in 2d), then the alternating product denotes the normal vector (i.e. a rank-1 tensor, since spacedim-dim=1) to the object at each vertex, where the normal vector's magnitude denotes the area element of the transformation from the reference object to the object given by the vertices. In particular, if again the mapping from reference object to the object under consideration here is linear (not bi- or trilinear), then the returned vectors are all parallel, perpendicular to the mapped object described by the vertices, and have a magnitude equal to the area/volume of the mapped object. If dim=1, spacedim=2, then the returned value is $\mathbf v_1^\perp$, where $\mathbf v_1$ is the image of the sole unit vector of a line mapped to the line in 2d given by the vertices; if dim=2, spacedim=3, then the returned values are $\mathbf v_1
+\wedge \mathbf v_2=\mathbf v_1 \times \mathbf v_2$ where $\mathbf
+v_1,\mathbf v_2$ are the two three-dimensional vectors that are tangential to the quad mapped into three-dimensional space.

    This function is used in order to determine how distorted a cell is (see the entry on distorted cells in the glossary).

    /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 2024-11-15 06:44:34.035712717 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1AdditionalQGeneratorData.html 2024-11-15 06:44:34.035712717 +0000 @@ -216,7 +216,7 @@
    -

    For a level set function, $\psi$, the implicit function theorem states that it is possible to write one of the coordinates $x_i$ as a function of the others if

    +

    For a level set function, $\psi$, the implicit function theorem states that it is possible to write one of the coordinates $x_i$ as a function of the others if

    $|\frac{\partial \psi}{\partial x_i}| > 0$.

    In practice, the bound we have for the expression in the left-hand side may be near but not equal to zero due to roundoff errors.

    This constant is a safety margin, $C$, that states that the implicit function theorem can be used when

    @@ -255,7 +255,7 @@
    -

    A constant, $C$, controlling when a level set function, $\psi$, is considered positive or negative definite:

    +

    A constant, $C$, controlling when a level set function, $\psi$, is considered positive or negative definite:

    $\psi(x) >  C \Rightarrow \text{Positive definite}$, $\psi(x) < -C \Rightarrow \text{Negative definite}$.

    Definition at line 110 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 2024-11-15 06:44:34.051712860 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1RegionUpdateFlags.html 2024-11-15 06:44:34.051712860 +0000 @@ -129,14 +129,14 @@  

    Detailed Description

    -

    Struct storing UpdateFlags for the 3 regions of a cell, $K$, that is defined by the sign of a level set function, $\psi$:

    -\[
+<div class=

    Struct storing UpdateFlags for the 3 regions of a cell, $K$, that is defined by the sign of a level set function, $\psi$:

    +\[
 N = \{x \in K : \psi(x) < 0 \}, \\
 P = \{x \in K : \psi(x) > 0 \}, \\
 S = \{x \in K : \psi(x) = 0 \}.
-\] +\]" src="form_2114.png"/>

    -

    As in the QuadratureGenerator class, we refer to $N$, $P$ and $S$ as the inside, outside, and surface region. RegionUpdateFlags is used to describe how the FEValues objects, which are created by NonMatching::FEValues, should be updated.

    +

    As in the QuadratureGenerator class, we refer to $N$, $P$ and $S$ as the inside, outside, and surface region. RegionUpdateFlags is used to describe how the FEValues objects, which are created by NonMatching::FEValues, should be updated.

    Definition at line 57 of file fe_values.h.

    Constructor & Destructor Documentation

    @@ -172,7 +172,7 @@
    -

    Flags for the region $\{x \in K : \psi(x) < 0 \}$

    +

    Flags for the region $\{x \in K : \psi(x) < 0 \}$

    Definition at line 67 of file fe_values.h.

    @@ -189,7 +189,7 @@
    -

    Flags for the region $\{x \in K : \psi(x) > 0 \}$

    +

    Flags for the region $\{x \in K : \psi(x) > 0 \}$

    Definition at line 72 of file fe_values.h.

    @@ -206,7 +206,7 @@
    -

    Flags for the region $\{x \in K : \psi(x) = 0 \}$

    +

    Flags for the region $\{x \in K : \psi(x) = 0 \}$

    Definition at line 77 of file fe_values.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 2024-11-15 06:44:34.063712967 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structNonMatching_1_1internal_1_1QuadratureGeneratorImplementation_1_1HeightDirectionData.html 2024-11-15 06:44:34.063712967 +0000 @@ -129,10 +129,10 @@

    Detailed Description

    Data representing the best choice of height-function direction, which is returned by the function find_best_height_direction.

    This data consists of a coordinate direction

    -

    $i \in \{0, ..., dim - 1 \}$,

    +

    $i \in \{0, ..., dim - 1 \}$,

    and lower bound on the absolute value of the derivative of some associated function, f, taken in the above coordinate direction. That is, a bound $C$ such that

    -

    $|\frac{\partial f}{\partial x_i}| > C$,

    -

    holding over some subset of $\mathbb{R}^{dim}$.

    +

    $|\frac{\partial f}{\partial x_i}| > C$,

    +

    holding over some subset of $\mathbb{R}^{dim}$.

    Definition at line 999 of file quadrature_generator.h.

    Constructor & Destructor Documentation

    @@ -185,7 +185,7 @@
    -

    The lower bound on $|\frac{\partial f}{\partial x_i}|$, described above.

    +

    The lower bound on $|\frac{\partial f}{\partial x_i}|$, described above.

    Definition at line 1017 of file quadrature_generator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2024-11-15 06:44:34.083713146 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structProductType.html 2024-11-15 06:44:34.083713146 +0000 @@ -145,10 +145,10 @@
    auto product = t*u;

    The local alias of this structure represents the type the variable product would have.

    Where is this useful

    -

    The purpose of this class is principally to represent the type one needs to use to represent the values or gradients of finite element fields at quadrature points. For example, assume you are storing the values $U_j$ of unknowns in a Vector<float>, then evaluating $u_h(x_q) = \sum_j U_j
-\varphi_j(x_q)$ at quadrature points results in values $u_h(x_q)$ that need to be stored as double variables because the $U_j$ are float values and the $\varphi_j(x_q)$ are computed as double values, and the product are then double values. On the other hand, if you store your unknowns $U_j$ as std::complex<double> values and you try to evaluate $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ at quadrature points, then the gradients $\nabla u_h(x_q)$ need to be stored as objects of type Tensor<1,dim,std::complex<double>> because that's what you get when you multiply a complex number by a Tensor<1,dim> (the type used to represent the gradient of shape functions of scalar finite elements).

    -

    Likewise, if you are using a vector valued element (with dim components) and the $U_j$ are stored as double variables, then $u_h(x_q) = \sum_j
-U_j \varphi_j(x_q)$ needs to have type Tensor<1,dim> (because the shape functions have type Tensor<1,dim>). Finally, if you store the $U_j$ as objects of type std::complex<double> and you have a vector valued element, then the gradients $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ will result in objects of type Tensor<2,dim,std::complex<double> >.

    +

    The purpose of this class is principally to represent the type one needs to use to represent the values or gradients of finite element fields at quadrature points. For example, assume you are storing the values $U_j$ of unknowns in a Vector<float>, then evaluating $u_h(x_q) = \sum_j U_j
+\varphi_j(x_q)$ at quadrature points results in values $u_h(x_q)$ that need to be stored as double variables because the $U_j$ are float values and the $\varphi_j(x_q)$ are computed as double values, and the product are then double values. On the other hand, if you store your unknowns $U_j$ as std::complex<double> values and you try to evaluate $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ at quadrature points, then the gradients $\nabla u_h(x_q)$ need to be stored as objects of type Tensor<1,dim,std::complex<double>> because that's what you get when you multiply a complex number by a Tensor<1,dim> (the type used to represent the gradient of shape functions of scalar finite elements).

    +

    Likewise, if you are using a vector valued element (with dim components) and the $U_j$ are stored as double variables, then $u_h(x_q) = \sum_j
+U_j \varphi_j(x_q)$ needs to have type Tensor<1,dim> (because the shape functions have type Tensor<1,dim>). Finally, if you store the $U_j$ as objects of type std::complex<double> and you have a vector valued element, then the gradients $\nabla u_h(x_q) = \sum_j U_j \nabla\varphi_j(x_q)$ will result in objects of type Tensor<2,dim,std::complex<double> >.

    In all of these cases, this type is used to identify which type needs to be used for the result of computing the product of unknowns and the values, gradients, or other properties of shape functions.

    Definition at line 458 of file template_constraints.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 2024-11-15 06:44:34.099713289 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structSUNDIALS_1_1SundialsPreconditioner.html 2024-11-15 06:44:34.099713289 +0000 @@ -199,7 +199,7 @@ const VectorType & src&#href_anchor"memdoc"> -

    Apply the wrapped preconditioner, i.e., solve $Px=b$ where $x$ is the dst vector and $b$ the src vector.

    +

    Apply the wrapped preconditioner, i.e., solve $Px=b$ where $x$ is the dst vector and $b$ the src vector.

    Parameters
    /usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 2024-11-15 06:44:34.115713432 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/structSynchronousIterators.html 2024-11-15 06:44:34.115713432 +0000 @@ -339,7 +339,7 @@
    dstResult vector of the preconditioner application
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 147 of file synchronous_iterator.h.

    @@ -369,7 +369,7 @@
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 160 of file synchronous_iterator.h.

    @@ -399,7 +399,7 @@
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 174 of file synchronous_iterator.h.

    @@ -519,7 +519,7 @@
    -

    Advance the elements of this iterator by $n$.

    +

    Advance the elements of this iterator by $n$.

    Definition at line 235 of file synchronous_iterator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 2024-11-15 06:44:34.155713789 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/symmetric__tensor_8h.html 2024-11-15 06:44:34.155713789 +0000 @@ -379,7 +379,7 @@
    -

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    +

    Return the fourth-order symmetric identity tensor $\mathbb S$ which maps symmetric second-order tensors, such as $\mathbf A$, to themselves.

    \[
   \mathbb S : \mathbf A = \mathbf A
 \] @@ -870,7 +870,7 @@

    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a rank-2 tensor of size 1, the result is simply zero.

    @@ -902,11 +902,11 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    For the kind of arguments to this function, i.e., a symmetric rank-2 tensor of size 2, the result is (counting indices starting at one) $I_2(\mathbf A) = II(\mathbf A) = \frac 12
   \left[ (A_{11} + A_{22})^2 - (A_{11}^2+2 A_{12}^2+ A_{22}^2) \right]
-  = A_{11} A_{22} - A_{12}^2$. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    + = A_{11} A_{22} - A_{12}^2$" src="form_830.png"/>. As expected, for the $2\times 2$ symmetric tensors this function handles, this equals the determinant of the tensor. (This is so because for $2\times 2$ symmetric tensors, there really are only two invariants, so the second and third invariant are the same; the determinant is the third invariant.)

    Definition at line 2841 of file symmetric_tensor.h.

    @@ -936,7 +936,7 @@
    -

    Compute the second invariant of a tensor of rank 2. The second invariant of a tensor $\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
+<p>Compute the second invariant of a tensor of rank 2. The second invariant of a tensor <picture><source srcset=$\mathbf A$ is defined as $I_2 (\mathbf A) = II(\mathbf A) = \frac 12
 \left[ (\text{tr} \mathbf A)^2 - \text{tr} (\mathbf{A}^2) \right]$.

    Definition at line 2858 of file symmetric_tensor.h.

    @@ -979,8 +979,8 @@
    -

    Return the eigenvalues of a symmetric $2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    -

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
+<p>Return the eigenvalues of a symmetric <picture><source srcset=$2\times 2$ tensor. The array of eigenvalues is sorted in descending order.

    +

    For $2\times 2$ tensors, the eigenvalues of tensor $\mathbf T$ are the roots of the characteristic polynomial $0 = \lambda^2
 - \lambda\;\text{tr}\mathbf{T} + \det \mathbf{T}$ as given by $\lambda_1, \lambda_2 = \frac{1}{2} \left[ \text{tr} \mathbf{T} \pm
 \sqrt{(\text{tr} \mathbf{T})^2 - 4 \det \mathbf{T}} \right]$.

    Warning
    The algorithm employed here determines the eigenvalues by computing the roots of the characteristic polynomial. In the case that there exists a common root (the eigenvalues are equal), the computation is subject to round-off errors of order $\sqrt{\epsilon}$. As an alternative, the eigenvectors() function provides a more robust, but costly, method to compute the eigenvalues of a symmetric tensor.
    /usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 2024-11-15 06:44:34.175713968 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/synchronous__iterator_8h.html 2024-11-15 06:44:34.175713968 +0000 @@ -233,7 +233,7 @@
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 147 of file synchronous_iterator.h.

    @@ -263,7 +263,7 @@
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 160 of file synchronous_iterator.h.

    @@ -293,7 +293,7 @@
    -

    Advance a tuple of iterators by $n$.

    +

    Advance a tuple of iterators by $n$.

    Definition at line 174 of file synchronous_iterator.h.

    @@ -413,7 +413,7 @@
    -

    Advance the elements of this iterator by $n$.

    +

    Advance the elements of this iterator by $n$.

    Definition at line 235 of file synchronous_iterator.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 2024-11-15 06:44:34.199714182 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/tensor_8h.html 2024-11-15 06:44:34.199714182 +0000 @@ -637,11 +637,11 @@

    Entrywise multiplication of two tensor objects of general rank.

    This multiplication is also called "Hadamard-product" (c.f. https://en.wikipedia.org/wiki/Hadamard_product_(matrices)), and generates a new tensor of size <rank, dim>:

    -\[
+<picture><source srcset=\[
   \text{result}_{i, j}
   = \text{left}_{i, j}\circ
     \text{right}_{i, j}
-\] +\]" src="form_892.png"/>

    Template Parameters
    @@ -678,17 +678,17 @@
    -

    The dot product (single contraction) for tensors. This function return a tensor of rank $(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    -\[
+<p>The dot product (single contraction) for tensors. This function return a tensor of rank <picture><source srcset=$(\text{rank}_1 + \text{rank}_2 - 2)$ that is the contraction of the last index of a tensor src1 of rank rank_1 with the first index of a tensor src2 of rank rank_2:

    +\[
   \text{result}_{i_1,\ldots,i_{r1},j_1,\ldots,j_{r2}}
   = \sum_{k}
     \text{left}_{i_1,\ldots,i_{r1}, k}
     \text{right}_{k, j_1,\ldots,j_{r2}}
-\] +\]" src="form_863.png"/>

    Note
    For the Tensor class, the multiplication operator only performs a contraction over a single pair of indices. This is in contrast to the multiplication operator for SymmetricTensor, for which the corresponding operator*() performs a double contraction. The origin of the difference in how operator*() is implemented between Tensor and SymmetricTensor is that for the former, the product between two Tensor objects of same rank and dimension results in another Tensor object – that it, operator*() corresponds to the multiplicative group action within the group of tensors. On the other hand, there is no corresponding multiplicative group action with the set of symmetric tensors because, in general, the product of two symmetric tensors is a nonsymmetric tensor. As a consequence, for a mathematician, it is clear that operator*() for symmetric tensors must have a different meaning: namely the dot or scalar product that maps two symmetric tensors of rank 2 to a scalar. This corresponds to the double-dot (colon) operator whose meaning is then extended to the product of any two even-ranked symmetric tensors.
    -In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).
    +In case the contraction yields a tensor of rank 0, that is, if rank_1==rank_2==1, then a scalar number is returned as an unwrapped number type. Return the $l_1$ norm of the given rank-2 tensor, where $\|\mathbf T\|_1 = \max_j \sum_i |T_{ij}|$ (maximum of the sums over columns).

    Definition at line 3039 of file tensor.h.

    @@ -718,7 +718,7 @@
    -

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    +

    Return the $l_\infty$ norm of the given rank-2 tensor, where $\|\mathbf T\|_\infty = \max_i \sum_j |T_{ij}|$ (maximum of the sums over rows).

    Definition at line 3065 of file tensor.h.

    /usr/share/doc/packages/dealii/doxygen/deal.II/todo.html differs (HTML document, ASCII text, with very long lines) --- old//usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 2024-11-15 06:44:34.215714325 +0000 +++ new//usr/share/doc/packages/dealii/doxygen/deal.II/todo.html 2024-11-15 06:44:34.215714325 +0000 @@ -171,9 +171,9 @@
    Class MeshWorker::Assembler::MGMatrixSimple< MatrixType >
    The matrix structures needed for assembling level matrices with local refinement and continuous elements are missing.
    Class MeshWorker::Assembler::ResidualLocalBlocksToGlobalBlocks< VectorType >
    -
    Comprehensive model currently not implemented.
    +
    Comprehensive model currently not implemented.
    Class MeshWorker::DoFInfoBox< dim, DOFINFO >
    -
    Currently, we are storing an object for the cells and two for each face. We could gather all face data pertaining to the cell itself in one object, saving a bit of memory and a few operations, but sacrificing some cleanliness.
    +
    Currently, we are storing an object for the cells and two for each face. We could gather all face data pertaining to the cell itself in one object, saving a bit of memory and a few operations, but sacrificing some cleanliness.
    Class MGTransferBase< VectorType >
    update the following documentation, since it does not reflect the latest changes in structure.
    Class PolynomialsAdini< dim >
    overalldiffered=1 (number of pkgs that are not bit-by-bit identical: 0 is good) overall=1